comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Use the JSON assert helper in `testutils`
void assertSameJson(String expected, String actual) { ByteArrayOutputStream expectedPretty = new ByteArrayOutputStream(); ByteArrayOutputStream actualPretty = new ByteArrayOutputStream(); JsonFormat formatter = new JsonFormat(false); try { formatter.encode(actualPretty, SlimeUtils.jsonToSlimeOrThrow(actual)); formatter.encode(expectedPretty, SlimeUtils.jsonToSlimeOrThrow(expected)); } catch (IOException e) { throw new UncheckedIOException(e); } assertEquals(expectedPretty.toString(UTF_8), actualPretty.toString(UTF_8)); }
ByteArrayOutputStream expectedPretty = new ByteArrayOutputStream();
void assertSameJson(String expected, String actual) { ByteArrayOutputStream expectedPretty = new ByteArrayOutputStream(); ByteArrayOutputStream actualPretty = new ByteArrayOutputStream(); JsonFormat formatter = new JsonFormat(false); try { formatter.encode(actualPretty, SlimeUtils.jsonToSlimeOrThrow(actual)); formatter.encode(expectedPretty, SlimeUtils.jsonToSlimeOrThrow(expected)); } catch (IOException e) { throw new UncheckedIOException(e); } assertEquals(expectedPretty.toString(UTF_8), actualPretty.toString(UTF_8)); }
class DocumentV1ApiTest { final DocumentmanagerConfig docConfig = Deriver.getDocumentManagerConfig("src/test/cfg/music.sd").build(); final DocumentTypeManager manager = new DocumentTypeManager(docConfig); final Document doc1 = new Document(manager.getDocumentType("music"), "id:space:music::one"); final Document doc2 = new Document(manager.getDocumentType("music"), "id:space:music:n=1:two"); final Document doc3 = new Document(manager.getDocumentType("music"), "id:space:music:g=a:three"); { doc1.setFieldValue("artist", "Tom Waits"); doc2.setFieldValue("artist", "Asa-Chan & Jun-Ray"); } ManualClock clock; DocumentOperationParser parser; LocalDocumentAccess access; DocumentOperationExecutorMock executor; Metric metric; MetricReceiver metrics; DocumentV1ApiHandler handler; @Before public void setUp() { clock = new ManualClock(); parser = new DocumentOperationParser(docConfig); access = new LocalDocumentAccess(new DocumentAccessParams().setDocumentmanagerConfig(docConfig)); executor = new DocumentOperationExecutorMock(); metric = new NullMetric(); metrics = new MetricReceiver.MockReceiver(); handler = new DocumentV1ApiHandler(clock, executor, parser, metric, metrics); } @After public void tearDown() { handler.destroy(); } @Test public void testResponses() { try (RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler)) { var response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/not-found\"," + " \"message\": \"Nothing at '/document/v1/not-found'. Available paths are:\\n" + "/document/v1/\\n" + "/document/v1/{namespace}/{documentType}/docid/\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/\\n" + "/document/v1/{namespace}/{documentType}/docid/{docid}\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/{docid}\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/{docid}\"" + "}", response.readAll()); assertEquals("application/json; charset=UTF-8", response.getResponse().headers().getFirst("Content-Type")); assertEquals(404, response.getStatus()); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&continuation=token"); executor.lastVisitContext().document(doc1); executor.lastVisitContext().document(doc2); executor.lastVisitContext().document(doc3); executor.lastVisitContext().success(Optional.of("token")); assertSameJson("{" + " \"pathId\": \"/document/v1\"," + " \"documents\": [" + " {" + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:n=1:two\"," + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:g=a:three\"," + " \"fields\": {}" + " }" + " ]," + " \"continuation\": \"token\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().cluster("lackluster").bucketSpace("default").wantedDocumentCount(1024) .concurrency(100).selection("all the things").fieldSet("[id]").continuation("token").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(BAD_REQUEST, "nope"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid\"," + " \"documents\": []," + " \"message\": \"nope\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(ERROR, "error"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/best\"," + " \"documents\": []," + " \"message\": \"error\"" + "}", response.readAll()); assertEquals(500, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of("best")).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/123\"," + " \"documents\": []" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of(123)).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"" + "}", response.readAll()); assertEquals(404, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route-to-lackluster").withFieldSet("go"), executor.lastParameters()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.of(doc1)); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); DocumentPut put = new DocumentPut(doc2); put.setCondition(new TestAndSetCondition("test it")); assertEquals(put, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/a/three\"," + " \"id\": \"id:space:music:g=a:three\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); DocumentUpdate update = new DocumentUpdate(manager.getDocumentType("music"), "id:space:music:g=a:three"); update.addFieldUpdate(FieldUpdate.createAssign(manager.getDocumentType("music").getField("artist"), new StringFieldValue("Lisa Ekdahl"))); update.setCreateIfNonExistent(true); assertEquals(update, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentRemove(doc2.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route"), executor.lastParameters()); } } }
class DocumentV1ApiTest { final DocumentmanagerConfig docConfig = Deriver.getDocumentManagerConfig("src/test/cfg/music.sd").build(); final DocumentTypeManager manager = new DocumentTypeManager(docConfig); final Document doc1 = new Document(manager.getDocumentType("music"), "id:space:music::one"); final Document doc2 = new Document(manager.getDocumentType("music"), "id:space:music:n=1:two"); final Document doc3 = new Document(manager.getDocumentType("music"), "id:space:music:g=a:three"); { doc1.setFieldValue("artist", "Tom Waits"); doc2.setFieldValue("artist", "Asa-Chan & Jun-Ray"); } ManualClock clock; DocumentOperationParser parser; LocalDocumentAccess access; DocumentOperationExecutorMock executor; Metric metric; MetricReceiver metrics; DocumentV1ApiHandler handler; @Before public void setUp() { clock = new ManualClock(); parser = new DocumentOperationParser(docConfig); access = new LocalDocumentAccess(new DocumentAccessParams().setDocumentmanagerConfig(docConfig)); executor = new DocumentOperationExecutorMock(); metric = new NullMetric(); metrics = new MetricReceiver.MockReceiver(); handler = new DocumentV1ApiHandler(clock, executor, parser, metric, metrics); } @After public void tearDown() { handler.destroy(); } @Test public void testResponses() { RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler); var response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/not-found\"," + " \"message\": \"Nothing at '/document/v1/not-found'. Available paths are:\\n" + "/document/v1/\\n" + "/document/v1/{namespace}/{documentType}/docid/\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/\\n" + "/document/v1/{namespace}/{documentType}/docid/{docid}\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/{docid}\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/{docid}\"" + "}", response.readAll()); assertEquals("application/json; charset=UTF-8", response.getResponse().headers().getFirst("Content-Type")); assertEquals(404, response.getStatus()); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&continuation=token"); executor.lastVisitContext().document(doc1); executor.lastVisitContext().document(doc2); executor.lastVisitContext().document(doc3); executor.lastVisitContext().success(Optional.of("token")); assertSameJson("{" + " \"pathId\": \"/document/v1\"," + " \"documents\": [" + " {" + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:n=1:two\"," + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:g=a:three\"," + " \"fields\": {}" + " }" + " ]," + " \"continuation\": \"token\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().cluster("lackluster").bucketSpace("default").wantedDocumentCount(1024) .concurrency(100).selection("all the things").fieldSet("[id]").continuation("token").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(BAD_REQUEST, "nope"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid\"," + " \"documents\": []," + " \"message\": \"nope\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(ERROR, "error"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/best\"," + " \"documents\": []," + " \"message\": \"error\"" + "}", response.readAll()); assertEquals(500, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of("best")).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/123\"," + " \"documents\": []" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of(123)).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"" + "}", response.readAll()); assertEquals(404, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route-to-lackluster").withFieldSet("go"), executor.lastParameters()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.of(doc1)); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: response.readAll(); assertEquals(404, response.getStatus()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); DocumentPut put = new DocumentPut(doc2); put.setCondition(new TestAndSetCondition("test it")); assertEquals(put, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/a/three\"," + " \"id\": \"id:space:music:g=a:three\"" + "}", response.readAll()); DocumentUpdate update = new DocumentUpdate(doc3.getDataType(), doc3.getId()); update.addFieldUpdate(FieldUpdate.createAssign(doc3.getField("artist"), new StringFieldValue("Lisa Ekdahl"))); update.setCreateIfNonExistent(true); assertEquals(update, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); assertEquals(200, response.getStatus()); response = driver.sendRequest("http: "{" + " ┻━┻︵ \\(°□°)/ ︵ ┻━┻" + "}"); Inspector responseRoot = SlimeUtils.jsonToSlime(response.readAll()).get(); assertEquals("/document/v1/space/music/number/1/two", responseRoot.field("pathId").asString()); assertTrue(responseRoot.field("message").asString().startsWith("Unexpected character ('┻' (code 9531 / 0x253b)): was expecting double-quote to start field name")); assertEquals(400, response.getStatus()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/house/group/a/three\"," + " \"message\": \"Document type house does not exist\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentRemove(doc2.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route"), executor.lastParameters()); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"throw-me\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().error(TIMEOUT, "timeout"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"timeout\"" + "}", response.readAll()); assertEquals(504, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().error(OVERLOAD, "overload"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"overload\"" + "}", response.readAll()); assertEquals(429, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().error(PRECONDITION_FAILED, "no dice"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"no dice\"" + "}", response.readAll()); assertEquals(412, response.getStatus()); response = driver.sendRequest("http: response.clientClose(); executor.lastOperationContext().error(TIMEOUT, "no dice"); assertEquals("", response.readAll()); assertEquals(504, response.getStatus()); response = driver.sendRequest("https: assertEquals("", response.readAll()); assertEquals(204, response.getStatus()); assertEquals("GET,POST,PUT,DELETE", response.getResponse().headers().getFirst("Allow")); response = driver.sendRequest("https: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"message\": \"'PATCH' not allowed at '/document/v1/space/music/docid/one'. Allowed methods are: GET, POST, PUT, DELETE\"" + "}", response.readAll()); assertEquals(405, response.getStatus()); driver.close(); } }
Nice, thanks.
void assertSameJson(String expected, String actual) { ByteArrayOutputStream expectedPretty = new ByteArrayOutputStream(); ByteArrayOutputStream actualPretty = new ByteArrayOutputStream(); JsonFormat formatter = new JsonFormat(false); try { formatter.encode(actualPretty, SlimeUtils.jsonToSlimeOrThrow(actual)); formatter.encode(expectedPretty, SlimeUtils.jsonToSlimeOrThrow(expected)); } catch (IOException e) { throw new UncheckedIOException(e); } assertEquals(expectedPretty.toString(UTF_8), actualPretty.toString(UTF_8)); }
ByteArrayOutputStream expectedPretty = new ByteArrayOutputStream();
void assertSameJson(String expected, String actual) { ByteArrayOutputStream expectedPretty = new ByteArrayOutputStream(); ByteArrayOutputStream actualPretty = new ByteArrayOutputStream(); JsonFormat formatter = new JsonFormat(false); try { formatter.encode(actualPretty, SlimeUtils.jsonToSlimeOrThrow(actual)); formatter.encode(expectedPretty, SlimeUtils.jsonToSlimeOrThrow(expected)); } catch (IOException e) { throw new UncheckedIOException(e); } assertEquals(expectedPretty.toString(UTF_8), actualPretty.toString(UTF_8)); }
class DocumentV1ApiTest { final DocumentmanagerConfig docConfig = Deriver.getDocumentManagerConfig("src/test/cfg/music.sd").build(); final DocumentTypeManager manager = new DocumentTypeManager(docConfig); final Document doc1 = new Document(manager.getDocumentType("music"), "id:space:music::one"); final Document doc2 = new Document(manager.getDocumentType("music"), "id:space:music:n=1:two"); final Document doc3 = new Document(manager.getDocumentType("music"), "id:space:music:g=a:three"); { doc1.setFieldValue("artist", "Tom Waits"); doc2.setFieldValue("artist", "Asa-Chan & Jun-Ray"); } ManualClock clock; DocumentOperationParser parser; LocalDocumentAccess access; DocumentOperationExecutorMock executor; Metric metric; MetricReceiver metrics; DocumentV1ApiHandler handler; @Before public void setUp() { clock = new ManualClock(); parser = new DocumentOperationParser(docConfig); access = new LocalDocumentAccess(new DocumentAccessParams().setDocumentmanagerConfig(docConfig)); executor = new DocumentOperationExecutorMock(); metric = new NullMetric(); metrics = new MetricReceiver.MockReceiver(); handler = new DocumentV1ApiHandler(clock, executor, parser, metric, metrics); } @After public void tearDown() { handler.destroy(); } @Test public void testResponses() { try (RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler)) { var response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/not-found\"," + " \"message\": \"Nothing at '/document/v1/not-found'. Available paths are:\\n" + "/document/v1/\\n" + "/document/v1/{namespace}/{documentType}/docid/\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/\\n" + "/document/v1/{namespace}/{documentType}/docid/{docid}\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/{docid}\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/{docid}\"" + "}", response.readAll()); assertEquals("application/json; charset=UTF-8", response.getResponse().headers().getFirst("Content-Type")); assertEquals(404, response.getStatus()); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&continuation=token"); executor.lastVisitContext().document(doc1); executor.lastVisitContext().document(doc2); executor.lastVisitContext().document(doc3); executor.lastVisitContext().success(Optional.of("token")); assertSameJson("{" + " \"pathId\": \"/document/v1\"," + " \"documents\": [" + " {" + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:n=1:two\"," + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:g=a:three\"," + " \"fields\": {}" + " }" + " ]," + " \"continuation\": \"token\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().cluster("lackluster").bucketSpace("default").wantedDocumentCount(1024) .concurrency(100).selection("all the things").fieldSet("[id]").continuation("token").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(BAD_REQUEST, "nope"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid\"," + " \"documents\": []," + " \"message\": \"nope\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(ERROR, "error"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/best\"," + " \"documents\": []," + " \"message\": \"error\"" + "}", response.readAll()); assertEquals(500, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of("best")).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/123\"," + " \"documents\": []" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of(123)).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"" + "}", response.readAll()); assertEquals(404, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route-to-lackluster").withFieldSet("go"), executor.lastParameters()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.of(doc1)); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); DocumentPut put = new DocumentPut(doc2); put.setCondition(new TestAndSetCondition("test it")); assertEquals(put, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/a/three\"," + " \"id\": \"id:space:music:g=a:three\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); DocumentUpdate update = new DocumentUpdate(manager.getDocumentType("music"), "id:space:music:g=a:three"); update.addFieldUpdate(FieldUpdate.createAssign(manager.getDocumentType("music").getField("artist"), new StringFieldValue("Lisa Ekdahl"))); update.setCreateIfNonExistent(true); assertEquals(update, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentRemove(doc2.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route"), executor.lastParameters()); } } }
class DocumentV1ApiTest { final DocumentmanagerConfig docConfig = Deriver.getDocumentManagerConfig("src/test/cfg/music.sd").build(); final DocumentTypeManager manager = new DocumentTypeManager(docConfig); final Document doc1 = new Document(manager.getDocumentType("music"), "id:space:music::one"); final Document doc2 = new Document(manager.getDocumentType("music"), "id:space:music:n=1:two"); final Document doc3 = new Document(manager.getDocumentType("music"), "id:space:music:g=a:three"); { doc1.setFieldValue("artist", "Tom Waits"); doc2.setFieldValue("artist", "Asa-Chan & Jun-Ray"); } ManualClock clock; DocumentOperationParser parser; LocalDocumentAccess access; DocumentOperationExecutorMock executor; Metric metric; MetricReceiver metrics; DocumentV1ApiHandler handler; @Before public void setUp() { clock = new ManualClock(); parser = new DocumentOperationParser(docConfig); access = new LocalDocumentAccess(new DocumentAccessParams().setDocumentmanagerConfig(docConfig)); executor = new DocumentOperationExecutorMock(); metric = new NullMetric(); metrics = new MetricReceiver.MockReceiver(); handler = new DocumentV1ApiHandler(clock, executor, parser, metric, metrics); } @After public void tearDown() { handler.destroy(); } @Test public void testResponses() { RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler); var response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/not-found\"," + " \"message\": \"Nothing at '/document/v1/not-found'. Available paths are:\\n" + "/document/v1/\\n" + "/document/v1/{namespace}/{documentType}/docid/\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/\\n" + "/document/v1/{namespace}/{documentType}/docid/{docid}\\n" + "/document/v1/{namespace}/{documentType}/group/{group}/{docid}\\n" + "/document/v1/{namespace}/{documentType}/number/{number}/{docid}\"" + "}", response.readAll()); assertEquals("application/json; charset=UTF-8", response.getResponse().headers().getFirst("Content-Type")); assertEquals(404, response.getStatus()); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&continuation=token"); executor.lastVisitContext().document(doc1); executor.lastVisitContext().document(doc2); executor.lastVisitContext().document(doc3); executor.lastVisitContext().success(Optional.of("token")); assertSameJson("{" + " \"pathId\": \"/document/v1\"," + " \"documents\": [" + " {" + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:n=1:two\"," + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + " }," + " {" + " \"id\": \"id:space:music:g=a:three\"," + " \"fields\": {}" + " }" + " ]," + " \"continuation\": \"token\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().cluster("lackluster").bucketSpace("default").wantedDocumentCount(1024) .concurrency(100).selection("all the things").fieldSet("[id]").continuation("token").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(BAD_REQUEST, "nope"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid\"," + " \"documents\": []," + " \"message\": \"nope\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().error(ERROR, "error"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/best\"," + " \"documents\": []," + " \"message\": \"error\"" + "}", response.readAll()); assertEquals(500, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of("best")).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastVisitContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/123\"," + " \"documents\": []" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(VisitorOptions.builder().namespace("space").documentType("music").group(Group.of(123)).build(), executor.lastOptions()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"" + "}", response.readAll()); assertEquals(404, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route-to-lackluster").withFieldSet("go"), executor.lastParameters()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.of(doc1)); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"id\": \"id:space:music::one\"," + " \"fields\": {" + " \"artist\": \"Tom Waits\"" + " }" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentGet(doc1.getId()), executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: response.readAll(); assertEquals(404, response.getStatus()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); DocumentPut put = new DocumentPut(doc2); put.setCondition(new TestAndSetCondition("test it")); assertEquals(put, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/a/three\"," + " \"id\": \"id:space:music:g=a:three\"" + "}", response.readAll()); DocumentUpdate update = new DocumentUpdate(doc3.getDataType(), doc3.getId()); update.addFieldUpdate(FieldUpdate.createAssign(doc3.getField("artist"), new StringFieldValue("Lisa Ekdahl"))); update.setCreateIfNonExistent(true); assertEquals(update, executor.lastOperation()); assertEquals(parameters(), executor.lastParameters()); assertEquals(200, response.getStatus()); response = driver.sendRequest("http: "{" + " ┻━┻︵ \\(°□°)/ ︵ ┻━┻" + "}"); Inspector responseRoot = SlimeUtils.jsonToSlime(response.readAll()).get(); assertEquals("/document/v1/space/music/number/1/two", responseRoot.field("pathId").asString()); assertTrue(responseRoot.field("message").asString().startsWith("Unexpected character ('┻' (code 9531 / 0x253b)): was expecting double-quote to start field name")); assertEquals(400, response.getStatus()); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/house/group/a/three\"," + " \"message\": \"Document type house does not exist\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().success(Optional.empty()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); assertEquals(new DocumentRemove(doc2.getId()), executor.lastOperation()); assertEquals(parameters().withRoute("route"), executor.lastParameters()); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"throw-me\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().error(TIMEOUT, "timeout"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"timeout\"" + "}", response.readAll()); assertEquals(504, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().error(OVERLOAD, "overload"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"overload\"" + "}", response.readAll()); assertEquals(429, response.getStatus()); response = driver.sendRequest("http: executor.lastOperationContext().error(PRECONDITION_FAILED, "no dice"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"no dice\"" + "}", response.readAll()); assertEquals(412, response.getStatus()); response = driver.sendRequest("http: response.clientClose(); executor.lastOperationContext().error(TIMEOUT, "no dice"); assertEquals("", response.readAll()); assertEquals(504, response.getStatus()); response = driver.sendRequest("https: assertEquals("", response.readAll()); assertEquals(204, response.getStatus()); assertEquals("GET,POST,PUT,DELETE", response.getResponse().headers().getFirst("Allow")); response = driver.sendRequest("https: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"message\": \"'PATCH' not allowed at '/document/v1/space/music/docid/one'. Allowed methods are: GET, POST, PUT, DELETE\"" + "}", response.readAll()); assertEquals(405, response.getStatus()); driver.close(); } }
This sounds like a separate PR. Have you checked the history on this one ?
public CloseableHttpClient createClient() { HttpClientBuilder clientBuilder; if (connectionParams.useTlsConfigFromEnvironment()) { clientBuilder = VespaHttpClientBuilder.create(); } else { clientBuilder = HttpClientBuilder.create(); if (connectionParams.getSslContext() != null) { setSslContext(clientBuilder, connectionParams.getSslContext()); } else { SslContextBuilder builder = new SslContextBuilder(); if (connectionParams.getPrivateKey() != null && connectionParams.getCertificate() != null) { builder.withKeyStore(connectionParams.getPrivateKey(), connectionParams.getCertificate()); } if (connectionParams.getCaCertificates() != null) { builder.withTrustStore(connectionParams.getCaCertificates()); } setSslContext(clientBuilder, builder.build()); } if (connectionParams.getHostnameVerifier() != null) { clientBuilder.setSSLHostnameVerifier(connectionParams.getHostnameVerifier()); } clientBuilder.setUserTokenHandler(context -> null); } clientBuilder.setMaxConnPerRoute(1); clientBuilder.setMaxConnTotal(1); clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.V_TAG_COMPONENT)); clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.V_TAG_COMPONENT))); RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); requestConfigBuilder.setSocketTimeout(0); if (connectionParams.getProxyHost() != null) { requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort())); } clientBuilder.setDefaultRequestConfig(requestConfigBuilder.build()); log.fine(() -> "Creating HttpClient:" + " ConnectionTimeout " + connectionParams.getConnectionTimeToLive().getSeconds() + " seconds" + " proxyhost (can be null) " + connectionParams.getProxyHost() + ":" + connectionParams.getProxyPort() + (useSsl ? " using ssl " : " not using ssl") ); return clientBuilder.build(); }
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
public CloseableHttpClient createClient() { HttpClientBuilder clientBuilder; if (connectionParams.useTlsConfigFromEnvironment()) { clientBuilder = VespaHttpClientBuilder.create(); } else { clientBuilder = HttpClientBuilder.create(); if (connectionParams.getSslContext() != null) { setSslContext(clientBuilder, connectionParams.getSslContext()); } else { SslContextBuilder builder = new SslContextBuilder(); if (connectionParams.getPrivateKey() != null && connectionParams.getCertificate() != null) { builder.withKeyStore(connectionParams.getPrivateKey(), connectionParams.getCertificate()); } if (connectionParams.getCaCertificates() != null) { builder.withTrustStore(connectionParams.getCaCertificates()); } setSslContext(clientBuilder, builder.build()); } if (connectionParams.getHostnameVerifier() != null) { clientBuilder.setSSLHostnameVerifier(connectionParams.getHostnameVerifier()); } clientBuilder.setUserTokenHandler(context -> null); } clientBuilder.setMaxConnPerRoute(1); clientBuilder.setMaxConnTotal(1); clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.V_TAG_COMPONENT)); clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.V_TAG_COMPONENT))); RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); requestConfigBuilder.setSocketTimeout(0); if (connectionParams.getProxyHost() != null) { requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort())); } clientBuilder.setDefaultRequestConfig(requestConfigBuilder.build()); log.fine(() -> "Creating HttpClient:" + " ConnectionTimeout " + connectionParams.getConnectionTimeToLive().getSeconds() + " seconds" + " proxyhost (can be null) " + connectionParams.getProxyHost() + ":" + connectionParams.getProxyPort() + (useSsl ? " using ssl " : " not using ssl") ); return clientBuilder.build(); }
class HttpClientFactory { final ConnectionParams connectionParams; final boolean useSsl; public HttpClientFactory(ConnectionParams connectionParams, boolean useSsl) { this.connectionParams = connectionParams; this.useSsl = useSsl; } }
class HttpClientFactory { final ConnectionParams connectionParams; final boolean useSsl; public HttpClientFactory(ConnectionParams connectionParams, boolean useSsl) { this.connectionParams = connectionParams; this.useSsl = useSsl; } }
I'm assuming this is to reduce the amount of load generated by the test?
public void testClusterStateVersionIncreasesAcrossMasterElections() throws Exception { startingTest("MasterElectionTest::testClusterStateVersionIncreasesAcrossMasterElections"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 1; setUpFleetController(3, false, options); setUpVdsNodes(false, new DummyVdsNodeOptions()); fleetController = fleetControllers.get(0); waitForStableSystem(); waitForMaster(0); Stream.of(0, 1, 2).forEach(this::waitForCompleteCycle); StrictlyIncreasingVersionChecker checker = StrictlyIncreasingVersionChecker.bootstrappedWith( fleetControllers.get(0).getClusterState()); fleetControllers.get(0).shutdown(); waitForMaster(1); Stream.of(1, 2).forEach(this::waitForCompleteCycle); checker.updateAndVerify(fleetControllers.get(1).getClusterState()); }
setUpFleetController(3, false, options);
public void testClusterStateVersionIncreasesAcrossMasterElections() throws Exception { startingTest("MasterElectionTest::testClusterStateVersionIncreasesAcrossMasterElections"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 1; setUpFleetController(3, false, options); setUpVdsNodes(false, new DummyVdsNodeOptions()); fleetController = fleetControllers.get(0); waitForStableSystem(); waitForMaster(0); Stream.of(0, 1, 2).forEach(this::waitForCompleteCycle); StrictlyIncreasingVersionChecker checker = StrictlyIncreasingVersionChecker.bootstrappedWith( fleetControllers.get(0).getClusterState()); fleetControllers.get(0).shutdown(); waitForMaster(1); Stream.of(1, 2).forEach(this::waitForCompleteCycle); checker.updateAndVerify(fleetControllers.get(1).getClusterState()); }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
Yes, and we usually have 3 and not 5 controllers. But this is not important, I think I just tried it out
public void testClusterStateVersionIncreasesAcrossMasterElections() throws Exception { startingTest("MasterElectionTest::testClusterStateVersionIncreasesAcrossMasterElections"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 1; setUpFleetController(3, false, options); setUpVdsNodes(false, new DummyVdsNodeOptions()); fleetController = fleetControllers.get(0); waitForStableSystem(); waitForMaster(0); Stream.of(0, 1, 2).forEach(this::waitForCompleteCycle); StrictlyIncreasingVersionChecker checker = StrictlyIncreasingVersionChecker.bootstrappedWith( fleetControllers.get(0).getClusterState()); fleetControllers.get(0).shutdown(); waitForMaster(1); Stream.of(1, 2).forEach(this::waitForCompleteCycle); checker.updateAndVerify(fleetControllers.get(1).getClusterState()); }
setUpFleetController(3, false, options);
public void testClusterStateVersionIncreasesAcrossMasterElections() throws Exception { startingTest("MasterElectionTest::testClusterStateVersionIncreasesAcrossMasterElections"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 1; setUpFleetController(3, false, options); setUpVdsNodes(false, new DummyVdsNodeOptions()); fleetController = fleetControllers.get(0); waitForStableSystem(); waitForMaster(0); Stream.of(0, 1, 2).forEach(this::waitForCompleteCycle); StrictlyIncreasingVersionChecker checker = StrictlyIncreasingVersionChecker.bootstrappedWith( fleetControllers.get(0).getClusterState()); fleetControllers.get(0).shutdown(); waitForMaster(1); Stream.of(1, 2).forEach(this::waitForCompleteCycle); checker.updateAndVerify(fleetControllers.get(1).getClusterState()); }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
Hehe, yes ...
private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metrics, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } }
sessionStateWatcher.updateRemoteSession(remoteSession);
private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metrics, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Map<Long, LocalSession> localSessionCache = new ConcurrentHashMap<>(); private final Map<Long, RemoteSession> remoteSessionCache = new ConcurrentHashMap<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); if (remoteSessionCache.get(sessionId) == null) { createRemoteSession(sessionId); } } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, () -> "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.values()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public synchronized void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); RemoteSession session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } if (distributeApplicationPackage()) createLocalSessionUsingDistributedApplicationPackage(sessionId); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + session); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(RemoteSession remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.INFO, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); deactivate(remoteSession); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.INFO, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); RemoteSession session = remoteSessionCache.remove(sessionId); if (session != null) { deactivate(session); } metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); Optional<Long> activeSessionId = applicationRepo.activeSessionOf(applicationId); return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public synchronized RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(ensureApplicationLoaded(currentActiveSession)); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Map<Long, LocalSession> localSessionCache = new ConcurrentHashMap<>(); private final Map<Long, RemoteSession> remoteSessionCache = new ConcurrentHashMap<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); if (remoteSessionCache.get(sessionId) == null) { createRemoteSession(sessionId); } } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, () -> "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.values()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public synchronized void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); RemoteSession session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } if (distributeApplicationPackage()) createLocalSessionUsingDistributedApplicationPackage(sessionId); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + session); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(RemoteSession remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.INFO, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); deactivate(remoteSession); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.INFO, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); RemoteSession session = remoteSessionCache.remove(sessionId); if (session != null) { deactivate(session); } metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); Optional<Long> activeSessionId = applicationRepo.activeSessionOf(applicationId); return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public synchronized RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(ensureApplicationLoaded(currentActiveSession)); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Consider catching the particular subclass of IOException thrown when the file already exists (FileAlreadyExistsException?), and only try deletion etc then.
void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { String message = "Failed writing to file (" + inprogressFile.toPath() + "): "; log.log(Level.SEVERE, message + e.getMessage(), e); boolean successfulDelete = inprogressFile.delete(); if ( ! successfulDelete) log.log(Level.INFO, "Unable to delete " + inprogressFile.toPath()); throw new RuntimeException(message, e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); }
log.log(Level.SEVERE, message + e.getMessage(), e);
void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { String message = "Failed writing to file (" + inprogressFile.toPath() + "): "; log.log(Level.SEVERE, message + e.getMessage(), e); boolean successfulDelete = inprogressFile.delete(); if ( ! successfulDelete) log.log(Level.INFO, "Unable to delete " + inprogressFile.toPath()); throw new RuntimeException(message, e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private final long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(Level.SEVERE, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(Level.FINE, () -> "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(Level.SEVERE, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { if (inprogressFile.exists()) { Files.delete(inprogressFile.toPath()); } } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private final long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(Level.SEVERE, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(Level.FINE, () -> "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(Level.SEVERE, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { if (inprogressFile.exists()) { Files.delete(inprogressFile.toPath()); } } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
If we fail to write to the file we need to cleanup (we append to the file, so if it fails we have no idea of what has been written). So the change is just logging if we were unable to delete the file, we always need to delete no matter what the IOException was.
void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { String message = "Failed writing to file (" + inprogressFile.toPath() + "): "; log.log(Level.SEVERE, message + e.getMessage(), e); boolean successfulDelete = inprogressFile.delete(); if ( ! successfulDelete) log.log(Level.INFO, "Unable to delete " + inprogressFile.toPath()); throw new RuntimeException(message, e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); }
log.log(Level.SEVERE, message + e.getMessage(), e);
void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { String message = "Failed writing to file (" + inprogressFile.toPath() + "): "; log.log(Level.SEVERE, message + e.getMessage(), e); boolean successfulDelete = inprogressFile.delete(); if ( ! successfulDelete) log.log(Level.INFO, "Unable to delete " + inprogressFile.toPath()); throw new RuntimeException(message, e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private final long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(Level.SEVERE, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(Level.FINE, () -> "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(Level.SEVERE, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { if (inprogressFile.exists()) { Files.delete(inprogressFile.toPath()); } } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private final long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(Level.SEVERE, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(Level.FINE, () -> "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(Level.SEVERE, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { if (inprogressFile.exists()) { Files.delete(inprogressFile.toPath()); } } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
👍
void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { String message = "Failed writing to file (" + inprogressFile.toPath() + "): "; log.log(Level.SEVERE, message + e.getMessage(), e); boolean successfulDelete = inprogressFile.delete(); if ( ! successfulDelete) log.log(Level.INFO, "Unable to delete " + inprogressFile.toPath()); throw new RuntimeException(message, e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); }
log.log(Level.SEVERE, message + e.getMessage(), e);
void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { String message = "Failed writing to file (" + inprogressFile.toPath() + "): "; log.log(Level.SEVERE, message + e.getMessage(), e); boolean successfulDelete = inprogressFile.delete(); if ( ! successfulDelete) log.log(Level.INFO, "Unable to delete " + inprogressFile.toPath()); throw new RuntimeException(message, e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private final long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(Level.SEVERE, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(Level.FINE, () -> "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(Level.SEVERE, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { if (inprogressFile.exists()) { Files.delete(inprogressFile.toPath()); } } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private final long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(Level.SEVERE, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(Level.FINE, () -> "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(Level.SEVERE, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { if (inprogressFile.exists()) { Files.delete(inprogressFile.toPath()); } } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
This code is executed on bootstrap, so if a big deployment is happening concurrently the above db.lock() will time out. But bootstrap redeployments are also vulnerable to this.
public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.db = nodeRepository.database(); this.service = service; this.provisionControllerLoadBalancer = Flags.CONTROLLER_PROVISION_LB.bindTo(flagSource); for (var id : db.readLoadBalancerIds()) { try (var lock = db.lock(id.application())) { try (var innerLock = db.configLock(id.application())) { var loadBalancer = db.readLoadBalancer(id); loadBalancer.ifPresent(db::writeLoadBalancer); } } } }
try (var innerLock = db.configLock(id.application())) {
public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.db = nodeRepository.database(); this.service = service; this.provisionControllerLoadBalancer = Flags.CONTROLLER_PROVISION_LB.bindTo(flagSource); for (var id : db.readLoadBalancerIds()) { try (var lock = db.lock(id.application())) { try (var innerLock = db.configLock(id.application())) { var loadBalancer = db.readLoadBalancer(id); loadBalancer.ifPresent(db::writeLoadBalancer); } } } }
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDatabaseClient db; private final LoadBalancerService service; private final BooleanFlag provisionControllerLoadBalancer; /** * Prepare a load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes. It's state will remain unchanged. * * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State * * Calling this for irrelevant node or cluster types is a no-op. */ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) { if (!canForwardTo(requestedNodes.type(), cluster)) return; if (application.instance().isTester()) return; try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { ClusterSpec.Id clusterId = effectiveId(cluster); List<Node> nodes = nodesOf(clusterId, application); provision(application, clusterId, nodes, false, lock); } } } /** * Activate load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes and the load balancer itself will be moved to {@link LoadBalancer.State * * Load balancers for clusters that are no longer in given clusters are deactivated. * * Calling this when no load balancer has been prepared for given cluster is a no-op. */ public void activate(ApplicationId application, Set<ClusterSpec> clusters, @SuppressWarnings("unused") Mutex applicationLock, NestedTransaction transaction) { try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { for (var cluster : loadBalancedClustersOf(application).entrySet()) { provision(application, cluster.getKey(), cluster.getValue(), true, lock); } var surplusLoadBalancers = surplusLoadBalancersOf(application, clusters.stream() .map(LoadBalancerProvisioner::effectiveId) .collect(Collectors.toSet())); deactivate(surplusLoadBalancers, transaction); } } } /** * Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any * load balancer(s). */ public void deactivate(ApplicationId application, NestedTransaction transaction) { try (var lock = nodeRepository.lock(application)) { try (var innerLock = db.configLock(application)) { deactivate(nodeRepository.loadBalancers(application).asList(), transaction); } } } /** Returns load balancers of given application that are no longer referenced by given clusters */ private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) { var activeLoadBalancersByCluster = nodeRepository.loadBalancers(application) .in(LoadBalancer.State.active) .asList() .stream() .collect(Collectors.toMap(lb -> lb.id().cluster(), Function.identity())); var surplus = new ArrayList<LoadBalancer>(); for (var kv : activeLoadBalancersByCluster.entrySet()) { if (activeClusters.contains(kv.getKey())) continue; surplus.add(kv.getValue()); } return Collections.unmodifiableList(surplus); } private void deactivate(List<LoadBalancer> loadBalancers, NestedTransaction transaction) { var now = nodeRepository.clock().instant(); var deactivatedLoadBalancers = loadBalancers.stream() .map(lb -> lb.with(LoadBalancer.State.inactive, now)) .collect(Collectors.toList()); db.writeLoadBalancers(deactivatedLoadBalancers, transaction); } private boolean canForwardTo(NodeType type, ClusterSpec cluster) { boolean canForwardTo = service.canForwardTo(type, cluster.type()); if (canForwardTo) { if (type == NodeType.controller) return provisionControllerLoadBalancer.value(); } return canForwardTo; } /** Idempotently provision a load balancer for given application and cluster */ private void provision(ApplicationId application, ClusterSpec.Id clusterId, List<Node> nodes, boolean activate, @SuppressWarnings("unused") Mutex loadBalancersLock) { var id = new LoadBalancerId(application, clusterId); var now = nodeRepository.clock().instant(); var loadBalancer = db.readLoadBalancer(id); if (loadBalancer.isEmpty() && activate) return; var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active; var instance = provisionInstance(id, nodes, force); LoadBalancer newLoadBalancer; if (loadBalancer.isEmpty()) { newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now); } else { var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state(); newLoadBalancer = loadBalancer.get().with(instance).with(newState, now); if (loadBalancer.get().state() != newLoadBalancer.state()) { log.log(logLevel(), "Moving " + newLoadBalancer.id() + " to state " + newLoadBalancer.state()); } } db.writeLoadBalancer(newLoadBalancer); } private LoadBalancerInstance provisionInstance(LoadBalancerId id, List<Node> nodes, boolean force) { var reals = new LinkedHashSet<Real>(); for (var node : nodes) { for (var ip : reachableIpAddresses(node)) { reals.add(new Real(HostName.from(node.hostname()), ip)); } } log.log(logLevel(), "Creating " + id + ", targeting: " + reals); try { return service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals), force); } catch (Exception e) { throw new LoadBalancerServiceException("Failed to (re)configure " + id + ", targeting: " + reals + ". The operation will be retried on next deployment", e); } } /** Returns the nodes allocated to the given load balanced cluster */ private List<Node> nodesOf(ClusterSpec.Id loadBalancedCluster, ApplicationId application) { return loadBalancedClustersOf(application).getOrDefault(loadBalancedCluster, List.of()); } /** Returns the load balanced clusters of given application and their nodes */ private Map<ClusterSpec.Id, List<Node>> loadBalancedClustersOf(ApplicationId application) { NodeList nodes = NodeList.copyOf(nodeRepository.getNodes(Node.State.reserved, Node.State.active)) .owner(application); if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) { nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin); } else if (nodes.stream().anyMatch(node -> node.type() == NodeType.controller)) { nodes = nodes.nodeType(NodeType.controller).container(); } else { nodes = nodes.nodeType(NodeType.tenant).container(); } return nodes.stream().collect(Collectors.groupingBy(node -> effectiveId(node.allocation().get().membership().cluster()))); } /** Find IP addresses reachable by the load balancer service */ private Set<String> reachableIpAddresses(Node node) { Set<String> reachable = new LinkedHashSet<>(node.ipConfig().primary()); switch (service.protocol()) { case ipv4: reachable.removeIf(IP::isV6); break; case ipv6: reachable.removeIf(IP::isV4); break; } return reachable; } private static ClusterSpec.Id effectiveId(ClusterSpec cluster) { return cluster.combinedId().orElse(cluster.id()); } private Level logLevel() { return nodeRepository.zone().system().isCd() ? Level.INFO : Level.FINE; } }
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDatabaseClient db; private final LoadBalancerService service; private final BooleanFlag provisionControllerLoadBalancer; /** * Prepare a load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes. It's state will remain unchanged. * * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State * * Calling this for irrelevant node or cluster types is a no-op. */ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) { if (!canForwardTo(requestedNodes.type(), cluster)) return; if (application.instance().isTester()) return; try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { ClusterSpec.Id clusterId = effectiveId(cluster); List<Node> nodes = nodesOf(clusterId, application); provision(application, clusterId, nodes, false, lock); } } } /** * Activate load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes and the load balancer itself will be moved to {@link LoadBalancer.State * * Load balancers for clusters that are no longer in given clusters are deactivated. * * Calling this when no load balancer has been prepared for given cluster is a no-op. */ public void activate(ApplicationId application, Set<ClusterSpec> clusters, @SuppressWarnings("unused") Mutex applicationLock, NestedTransaction transaction) { try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { for (var cluster : loadBalancedClustersOf(application).entrySet()) { provision(application, cluster.getKey(), cluster.getValue(), true, lock); } var surplusLoadBalancers = surplusLoadBalancersOf(application, clusters.stream() .map(LoadBalancerProvisioner::effectiveId) .collect(Collectors.toSet())); deactivate(surplusLoadBalancers, transaction); } } } /** * Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any * load balancer(s). */ public void deactivate(ApplicationId application, NestedTransaction transaction) { try (var lock = nodeRepository.lock(application)) { try (var innerLock = db.configLock(application)) { deactivate(nodeRepository.loadBalancers(application).asList(), transaction); } } } /** Returns load balancers of given application that are no longer referenced by given clusters */ private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) { var activeLoadBalancersByCluster = nodeRepository.loadBalancers(application) .in(LoadBalancer.State.active) .asList() .stream() .collect(Collectors.toMap(lb -> lb.id().cluster(), Function.identity())); var surplus = new ArrayList<LoadBalancer>(); for (var kv : activeLoadBalancersByCluster.entrySet()) { if (activeClusters.contains(kv.getKey())) continue; surplus.add(kv.getValue()); } return Collections.unmodifiableList(surplus); } private void deactivate(List<LoadBalancer> loadBalancers, NestedTransaction transaction) { var now = nodeRepository.clock().instant(); var deactivatedLoadBalancers = loadBalancers.stream() .map(lb -> lb.with(LoadBalancer.State.inactive, now)) .collect(Collectors.toList()); db.writeLoadBalancers(deactivatedLoadBalancers, transaction); } private boolean canForwardTo(NodeType type, ClusterSpec cluster) { boolean canForwardTo = service.canForwardTo(type, cluster.type()); if (canForwardTo) { if (type == NodeType.controller) return provisionControllerLoadBalancer.value(); } return canForwardTo; } /** Idempotently provision a load balancer for given application and cluster */ private void provision(ApplicationId application, ClusterSpec.Id clusterId, List<Node> nodes, boolean activate, @SuppressWarnings("unused") Mutex loadBalancersLock) { var id = new LoadBalancerId(application, clusterId); var now = nodeRepository.clock().instant(); var loadBalancer = db.readLoadBalancer(id); if (loadBalancer.isEmpty() && activate) return; var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active; var instance = provisionInstance(id, nodes, force); LoadBalancer newLoadBalancer; if (loadBalancer.isEmpty()) { newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now); } else { var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state(); newLoadBalancer = loadBalancer.get().with(instance).with(newState, now); if (loadBalancer.get().state() != newLoadBalancer.state()) { log.log(logLevel(), "Moving " + newLoadBalancer.id() + " to state " + newLoadBalancer.state()); } } db.writeLoadBalancer(newLoadBalancer); } private LoadBalancerInstance provisionInstance(LoadBalancerId id, List<Node> nodes, boolean force) { var reals = new LinkedHashSet<Real>(); for (var node : nodes) { for (var ip : reachableIpAddresses(node)) { reals.add(new Real(HostName.from(node.hostname()), ip)); } } log.log(logLevel(), "Creating " + id + ", targeting: " + reals); try { return service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals), force); } catch (Exception e) { throw new LoadBalancerServiceException("Failed to (re)configure " + id + ", targeting: " + reals + ". The operation will be retried on next deployment", e); } } /** Returns the nodes allocated to the given load balanced cluster */ private List<Node> nodesOf(ClusterSpec.Id loadBalancedCluster, ApplicationId application) { return loadBalancedClustersOf(application).getOrDefault(loadBalancedCluster, List.of()); } /** Returns the load balanced clusters of given application and their nodes */ private Map<ClusterSpec.Id, List<Node>> loadBalancedClustersOf(ApplicationId application) { NodeList nodes = NodeList.copyOf(nodeRepository.getNodes(Node.State.reserved, Node.State.active)) .owner(application); if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) { nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin); } else if (nodes.stream().anyMatch(node -> node.type() == NodeType.controller)) { nodes = nodes.nodeType(NodeType.controller).container(); } else { nodes = nodes.nodeType(NodeType.tenant).container(); } return nodes.stream().collect(Collectors.groupingBy(node -> effectiveId(node.allocation().get().membership().cluster()))); } /** Find IP addresses reachable by the load balancer service */ private Set<String> reachableIpAddresses(Node node) { Set<String> reachable = new LinkedHashSet<>(node.ipConfig().primary()); switch (service.protocol()) { case ipv4: reachable.removeIf(IP::isV6); break; case ipv6: reachable.removeIf(IP::isV4); break; } return reachable; } private static ClusterSpec.Id effectiveId(ClusterSpec cluster) { return cluster.combinedId().orElse(cluster.id()); } private Level logLevel() { return nodeRepository.zone().system().isCd() ? Level.INFO : Level.FINE; } }
Yes. This is also why I left out locking in the `NodeRepository` constructor even though it's wrong. If a lock timeout happens at construction time it should recover though as the container will restart and try to construct the components again, and by then deployments are hopefully done.
public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.db = nodeRepository.database(); this.service = service; this.provisionControllerLoadBalancer = Flags.CONTROLLER_PROVISION_LB.bindTo(flagSource); for (var id : db.readLoadBalancerIds()) { try (var lock = db.lock(id.application())) { try (var innerLock = db.configLock(id.application())) { var loadBalancer = db.readLoadBalancer(id); loadBalancer.ifPresent(db::writeLoadBalancer); } } } }
try (var innerLock = db.configLock(id.application())) {
public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.db = nodeRepository.database(); this.service = service; this.provisionControllerLoadBalancer = Flags.CONTROLLER_PROVISION_LB.bindTo(flagSource); for (var id : db.readLoadBalancerIds()) { try (var lock = db.lock(id.application())) { try (var innerLock = db.configLock(id.application())) { var loadBalancer = db.readLoadBalancer(id); loadBalancer.ifPresent(db::writeLoadBalancer); } } } }
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDatabaseClient db; private final LoadBalancerService service; private final BooleanFlag provisionControllerLoadBalancer; /** * Prepare a load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes. It's state will remain unchanged. * * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State * * Calling this for irrelevant node or cluster types is a no-op. */ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) { if (!canForwardTo(requestedNodes.type(), cluster)) return; if (application.instance().isTester()) return; try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { ClusterSpec.Id clusterId = effectiveId(cluster); List<Node> nodes = nodesOf(clusterId, application); provision(application, clusterId, nodes, false, lock); } } } /** * Activate load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes and the load balancer itself will be moved to {@link LoadBalancer.State * * Load balancers for clusters that are no longer in given clusters are deactivated. * * Calling this when no load balancer has been prepared for given cluster is a no-op. */ public void activate(ApplicationId application, Set<ClusterSpec> clusters, @SuppressWarnings("unused") Mutex applicationLock, NestedTransaction transaction) { try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { for (var cluster : loadBalancedClustersOf(application).entrySet()) { provision(application, cluster.getKey(), cluster.getValue(), true, lock); } var surplusLoadBalancers = surplusLoadBalancersOf(application, clusters.stream() .map(LoadBalancerProvisioner::effectiveId) .collect(Collectors.toSet())); deactivate(surplusLoadBalancers, transaction); } } } /** * Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any * load balancer(s). */ public void deactivate(ApplicationId application, NestedTransaction transaction) { try (var lock = nodeRepository.lock(application)) { try (var innerLock = db.configLock(application)) { deactivate(nodeRepository.loadBalancers(application).asList(), transaction); } } } /** Returns load balancers of given application that are no longer referenced by given clusters */ private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) { var activeLoadBalancersByCluster = nodeRepository.loadBalancers(application) .in(LoadBalancer.State.active) .asList() .stream() .collect(Collectors.toMap(lb -> lb.id().cluster(), Function.identity())); var surplus = new ArrayList<LoadBalancer>(); for (var kv : activeLoadBalancersByCluster.entrySet()) { if (activeClusters.contains(kv.getKey())) continue; surplus.add(kv.getValue()); } return Collections.unmodifiableList(surplus); } private void deactivate(List<LoadBalancer> loadBalancers, NestedTransaction transaction) { var now = nodeRepository.clock().instant(); var deactivatedLoadBalancers = loadBalancers.stream() .map(lb -> lb.with(LoadBalancer.State.inactive, now)) .collect(Collectors.toList()); db.writeLoadBalancers(deactivatedLoadBalancers, transaction); } private boolean canForwardTo(NodeType type, ClusterSpec cluster) { boolean canForwardTo = service.canForwardTo(type, cluster.type()); if (canForwardTo) { if (type == NodeType.controller) return provisionControllerLoadBalancer.value(); } return canForwardTo; } /** Idempotently provision a load balancer for given application and cluster */ private void provision(ApplicationId application, ClusterSpec.Id clusterId, List<Node> nodes, boolean activate, @SuppressWarnings("unused") Mutex loadBalancersLock) { var id = new LoadBalancerId(application, clusterId); var now = nodeRepository.clock().instant(); var loadBalancer = db.readLoadBalancer(id); if (loadBalancer.isEmpty() && activate) return; var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active; var instance = provisionInstance(id, nodes, force); LoadBalancer newLoadBalancer; if (loadBalancer.isEmpty()) { newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now); } else { var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state(); newLoadBalancer = loadBalancer.get().with(instance).with(newState, now); if (loadBalancer.get().state() != newLoadBalancer.state()) { log.log(logLevel(), "Moving " + newLoadBalancer.id() + " to state " + newLoadBalancer.state()); } } db.writeLoadBalancer(newLoadBalancer); } private LoadBalancerInstance provisionInstance(LoadBalancerId id, List<Node> nodes, boolean force) { var reals = new LinkedHashSet<Real>(); for (var node : nodes) { for (var ip : reachableIpAddresses(node)) { reals.add(new Real(HostName.from(node.hostname()), ip)); } } log.log(logLevel(), "Creating " + id + ", targeting: " + reals); try { return service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals), force); } catch (Exception e) { throw new LoadBalancerServiceException("Failed to (re)configure " + id + ", targeting: " + reals + ". The operation will be retried on next deployment", e); } } /** Returns the nodes allocated to the given load balanced cluster */ private List<Node> nodesOf(ClusterSpec.Id loadBalancedCluster, ApplicationId application) { return loadBalancedClustersOf(application).getOrDefault(loadBalancedCluster, List.of()); } /** Returns the load balanced clusters of given application and their nodes */ private Map<ClusterSpec.Id, List<Node>> loadBalancedClustersOf(ApplicationId application) { NodeList nodes = NodeList.copyOf(nodeRepository.getNodes(Node.State.reserved, Node.State.active)) .owner(application); if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) { nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin); } else if (nodes.stream().anyMatch(node -> node.type() == NodeType.controller)) { nodes = nodes.nodeType(NodeType.controller).container(); } else { nodes = nodes.nodeType(NodeType.tenant).container(); } return nodes.stream().collect(Collectors.groupingBy(node -> effectiveId(node.allocation().get().membership().cluster()))); } /** Find IP addresses reachable by the load balancer service */ private Set<String> reachableIpAddresses(Node node) { Set<String> reachable = new LinkedHashSet<>(node.ipConfig().primary()); switch (service.protocol()) { case ipv4: reachable.removeIf(IP::isV6); break; case ipv6: reachable.removeIf(IP::isV4); break; } return reachable; } private static ClusterSpec.Id effectiveId(ClusterSpec cluster) { return cluster.combinedId().orElse(cluster.id()); } private Level logLevel() { return nodeRepository.zone().system().isCd() ? Level.INFO : Level.FINE; } }
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDatabaseClient db; private final LoadBalancerService service; private final BooleanFlag provisionControllerLoadBalancer; /** * Prepare a load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes. It's state will remain unchanged. * * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State * * Calling this for irrelevant node or cluster types is a no-op. */ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) { if (!canForwardTo(requestedNodes.type(), cluster)) return; if (application.instance().isTester()) return; try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { ClusterSpec.Id clusterId = effectiveId(cluster); List<Node> nodes = nodesOf(clusterId, application); provision(application, clusterId, nodes, false, lock); } } } /** * Activate load balancer for given application and cluster. * * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated * nodes and the load balancer itself will be moved to {@link LoadBalancer.State * * Load balancers for clusters that are no longer in given clusters are deactivated. * * Calling this when no load balancer has been prepared for given cluster is a no-op. */ public void activate(ApplicationId application, Set<ClusterSpec> clusters, @SuppressWarnings("unused") Mutex applicationLock, NestedTransaction transaction) { try (var lock = db.lock(application)) { try (var innerLock = db.configLock(application)) { for (var cluster : loadBalancedClustersOf(application).entrySet()) { provision(application, cluster.getKey(), cluster.getValue(), true, lock); } var surplusLoadBalancers = surplusLoadBalancersOf(application, clusters.stream() .map(LoadBalancerProvisioner::effectiveId) .collect(Collectors.toSet())); deactivate(surplusLoadBalancers, transaction); } } } /** * Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any * load balancer(s). */ public void deactivate(ApplicationId application, NestedTransaction transaction) { try (var lock = nodeRepository.lock(application)) { try (var innerLock = db.configLock(application)) { deactivate(nodeRepository.loadBalancers(application).asList(), transaction); } } } /** Returns load balancers of given application that are no longer referenced by given clusters */ private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) { var activeLoadBalancersByCluster = nodeRepository.loadBalancers(application) .in(LoadBalancer.State.active) .asList() .stream() .collect(Collectors.toMap(lb -> lb.id().cluster(), Function.identity())); var surplus = new ArrayList<LoadBalancer>(); for (var kv : activeLoadBalancersByCluster.entrySet()) { if (activeClusters.contains(kv.getKey())) continue; surplus.add(kv.getValue()); } return Collections.unmodifiableList(surplus); } private void deactivate(List<LoadBalancer> loadBalancers, NestedTransaction transaction) { var now = nodeRepository.clock().instant(); var deactivatedLoadBalancers = loadBalancers.stream() .map(lb -> lb.with(LoadBalancer.State.inactive, now)) .collect(Collectors.toList()); db.writeLoadBalancers(deactivatedLoadBalancers, transaction); } private boolean canForwardTo(NodeType type, ClusterSpec cluster) { boolean canForwardTo = service.canForwardTo(type, cluster.type()); if (canForwardTo) { if (type == NodeType.controller) return provisionControllerLoadBalancer.value(); } return canForwardTo; } /** Idempotently provision a load balancer for given application and cluster */ private void provision(ApplicationId application, ClusterSpec.Id clusterId, List<Node> nodes, boolean activate, @SuppressWarnings("unused") Mutex loadBalancersLock) { var id = new LoadBalancerId(application, clusterId); var now = nodeRepository.clock().instant(); var loadBalancer = db.readLoadBalancer(id); if (loadBalancer.isEmpty() && activate) return; var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active; var instance = provisionInstance(id, nodes, force); LoadBalancer newLoadBalancer; if (loadBalancer.isEmpty()) { newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now); } else { var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state(); newLoadBalancer = loadBalancer.get().with(instance).with(newState, now); if (loadBalancer.get().state() != newLoadBalancer.state()) { log.log(logLevel(), "Moving " + newLoadBalancer.id() + " to state " + newLoadBalancer.state()); } } db.writeLoadBalancer(newLoadBalancer); } private LoadBalancerInstance provisionInstance(LoadBalancerId id, List<Node> nodes, boolean force) { var reals = new LinkedHashSet<Real>(); for (var node : nodes) { for (var ip : reachableIpAddresses(node)) { reals.add(new Real(HostName.from(node.hostname()), ip)); } } log.log(logLevel(), "Creating " + id + ", targeting: " + reals); try { return service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals), force); } catch (Exception e) { throw new LoadBalancerServiceException("Failed to (re)configure " + id + ", targeting: " + reals + ". The operation will be retried on next deployment", e); } } /** Returns the nodes allocated to the given load balanced cluster */ private List<Node> nodesOf(ClusterSpec.Id loadBalancedCluster, ApplicationId application) { return loadBalancedClustersOf(application).getOrDefault(loadBalancedCluster, List.of()); } /** Returns the load balanced clusters of given application and their nodes */ private Map<ClusterSpec.Id, List<Node>> loadBalancedClustersOf(ApplicationId application) { NodeList nodes = NodeList.copyOf(nodeRepository.getNodes(Node.State.reserved, Node.State.active)) .owner(application); if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) { nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin); } else if (nodes.stream().anyMatch(node -> node.type() == NodeType.controller)) { nodes = nodes.nodeType(NodeType.controller).container(); } else { nodes = nodes.nodeType(NodeType.tenant).container(); } return nodes.stream().collect(Collectors.groupingBy(node -> effectiveId(node.allocation().get().membership().cluster()))); } /** Find IP addresses reachable by the load balancer service */ private Set<String> reachableIpAddresses(Node node) { Set<String> reachable = new LinkedHashSet<>(node.ipConfig().primary()); switch (service.protocol()) { case ipv4: reachable.removeIf(IP::isV6); break; case ipv6: reachable.removeIf(IP::isV4); break; } return reachable; } private static ClusterSpec.Id effectiveId(ClusterSpec cluster) { return cluster.combinedId().orElse(cluster.id()); } private Level logLevel() { return nodeRepository.zone().system().isCd() ? Level.INFO : Level.FINE; } }
Instantiating the ApplicationController for unit tests seemed like more trouble than it was worth. Maybe I should have mocked it instead?
private boolean hasNoDeployments(ApplicationId applicationId) { var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId)) .flatMap(app -> app.get(applicationId.instance())) .map(Instance::deployments); return deployments.isEmpty() || deployments.get().size() == 0; }
var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId))
private boolean hasNoDeployments(ApplicationId applicationId) { var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId)) .flatMap(app -> app.get(applicationId.instance())) .map(Instance::deployments); return deployments.isEmpty() || deployments.get().size() == 0; }
class EndpointCertificateManager { private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName()); private final ZoneRegistry zoneRegistry; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; private final Clock clock; private final BooleanFlag validateEndpointCertificates; private final StringFlag deleteUnusedEndpointCertificates; private final BooleanFlag endpointCertInSharedRouting; public EndpointCertificateManager(ZoneRegistry zoneRegistry, CuratorDb curator, SecretStore secretStore, EndpointCertificateProvider endpointCertificateProvider, Clock clock, FlagSource flagSource) { this.zoneRegistry = zoneRegistry; this.curator = curator; this.secretStore = secretStore; this.endpointCertificateProvider = endpointCertificateProvider; this.clock = clock; this.validateEndpointCertificates = Flags.VALIDATE_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.deleteUnusedEndpointCertificates = Flags.DELETE_UNUSED_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.endpointCertInSharedRouting = Flags.ENDPOINT_CERT_IN_SHARED_ROUTING.bindTo(flagSource); Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { try { this.deleteUnusedCertificates(); } catch (Throwable t) { log.log(Level.INFO, "Unexpected Throwable caught while deleting unused endpoint certificates", t); } }, 1, 10, TimeUnit.MINUTES); } public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { var t0 = Instant.now(); Optional<EndpointCertificateMetadata> metadata = getOrProvision(instance, zone, instanceSpec); metadata.ifPresent(m -> curator.writeEndpointCertificateMetadata(instance.id(), m.withLastRequested(clock.instant().getEpochSecond()))); Duration duration = Duration.between(t0, Instant.now()); if (duration.toSeconds() > 30) log.log(Level.INFO, String.format("Getting endpoint certificate metadata for %s took %d seconds!", instance.id().serializedForm(), duration.toSeconds())); return metadata; } @NotNull private Optional<EndpointCertificateMetadata> getOrProvision(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { boolean endpointCertInSharedRouting = this.endpointCertInSharedRouting.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value(); if (!zoneRegistry.zones().directlyRouted().ids().contains(zone) && !endpointCertInSharedRouting) return Optional.empty(); final var currentCertificateMetadata = curator.readEndpointCertificateMetadata(instance.id()); if (currentCertificateMetadata.isEmpty()) { var provisionedCertificateMetadata = provisionEndpointCertificate(instance, Optional.empty(), zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata); return Optional.of(provisionedCertificateMetadata); } var sansInCertificate = currentCertificateMetadata.get().requestedDnsSans(); var requiredSansForZone = dnsNamesOf(instance.id(), zone); if (sansInCertificate.isPresent() && !sansInCertificate.get().containsAll(requiredSansForZone)) { var reprovisionedCertificateMetadata = provisionEndpointCertificate(instance, currentCertificateMetadata, zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), reprovisionedCertificateMetadata); validateEndpointCertificate(reprovisionedCertificateMetadata, instance, zone); return Optional.of(reprovisionedCertificateMetadata); } var latestAvailableVersion = latestVersionInSecretStore(currentCertificateMetadata.get()); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > currentCertificateMetadata.get().version()) { var refreshedCertificateMetadata = currentCertificateMetadata.get().withVersion(latestAvailableVersion.getAsInt()); validateEndpointCertificate(refreshedCertificateMetadata, instance, zone); curator.writeEndpointCertificateMetadata(instance.id(), refreshedCertificateMetadata); return Optional.of(refreshedCertificateMetadata); } validateEndpointCertificate(currentCertificateMetadata.get(), instance, zone); return currentCertificateMetadata; } enum CleanupMode { DISABLE, DRYRUN, ENABLE } private void deleteUnusedCertificates() { CleanupMode mode = CleanupMode.valueOf(deleteUnusedEndpointCertificates.value()); if (mode == CleanupMode.DISABLE) return; var oneMonthAgo = clock.instant().minus(1, ChronoUnit.MONTHS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { log.log(LogLevel.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments" + (mode == CleanupMode.ENABLE ? ", deleting from provider and ZK" : "")); if (mode == CleanupMode.ENABLE) { endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData); curator.deleteEndpointCertificateMetadata(applicationId); } } }); } private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance, Optional<EndpointCertificateMetadata> currentMetadata, ZoneId deploymentZone, Optional<DeploymentInstanceSpec> instanceSpec) { List<String> currentlyPresentNames = currentMetadata.isPresent() ? currentMetadata.get().requestedDnsSans().orElseThrow(() -> new RuntimeException("Certificate metadata exists but SANs are not present!")) : Collections.emptyList(); var requiredZones = new LinkedHashSet<>(Set.of(deploymentZone)); var zoneCandidateList = zoneRegistry.zones().controllerUpgraded().zones().stream().map(ZoneApi::getId).collect(Collectors.toList()); if (!deploymentZone.environment().isManuallyDeployed()) { zoneCandidateList.stream() .filter(z -> z.environment().isTest() || instanceSpec.isPresent() && instanceSpec.get().deploysTo(z.environment(), z.region())) .forEach(requiredZones::add); } var requiredNames = requiredZones.stream() .flatMap(zone -> dnsNamesOf(instance.id(), zone).stream()) .collect(Collectors.toCollection(LinkedHashSet::new)); zoneCandidateList.stream() .map(zone -> dnsNamesOf(instance.id(), zone)) .filter(zoneNames -> zoneNames.stream().anyMatch(currentlyPresentNames::contains)) .filter(currentlyPresentNames::containsAll) .forEach(requiredNames::addAll); if (!requiredNames.containsAll(currentlyPresentNames)) throw new RuntimeException("SANs to be requested do not cover all existing names! Missing names: " + currentlyPresentNames.stream().filter(s -> !requiredNames.contains(s)).collect(Collectors.joining(", "))); return endpointCertificateProvider.requestCaSignedCertificate(instance.id(), List.copyOf(requiredNames), currentMetadata); } private void validateEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) { if (validateEndpointCertificates.value()) try { var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version()); if (pemEncodedEndpointCertificate == null) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Secret store returned null for certificate"); List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate); if (x509CertificateList.isEmpty()) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Empty certificate list"); if (x509CertificateList.size() < 2) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Only a single certificate found in chain - intermediate certificates likely missing"); Instant now = clock.instant(); Instant firstExpiry = Instant.MAX; for (X509Certificate x509Certificate : x509CertificateList) { Instant notBefore = x509Certificate.getNotBefore().toInstant(); Instant notAfter = x509Certificate.getNotAfter().toInstant(); if (now.isBefore(notBefore)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is not yet valid"); if (now.isAfter(notAfter)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate has expired"); if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter; } X509Certificate endEntityCertificate = x509CertificateList.get(0); Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream() .filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME)) .map(SubjectAlternativeName::getValue).collect(Collectors.toSet()); var dnsNamesOfZone = dnsNamesOf(instance.id(), zone); if (!subjectAlternativeNames.containsAll(dnsNamesOfZone)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is missing required SANs for zone " + zone.value()); } catch (SecretNotFoundException s) { throw new EndpointCertificateException(EndpointCertificateException.Type.CERT_NOT_AVAILABLE, "Certificate not found in secret store"); } catch (EndpointCertificateException e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw e; } catch (Exception e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate validation failure for app " + instance.id().serializedForm(), e); } } private List<String> dnsNamesOf(ApplicationId applicationId, ZoneId zone) { List<String> endpointDnsNames = new ArrayList<>(); endpointDnsNames.add(commonNameHashOf(applicationId, zoneRegistry.system())); List<Endpoint.EndpointBuilder> endpoints = new ArrayList<>(); if (zone.environment().isProduction()) { endpoints.add(Endpoint.of(applicationId).target(EndpointId.defaultId())); endpoints.add(Endpoint.of(applicationId).wildcard()); } endpoints.add(Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone)); endpoints.add(Endpoint.of(applicationId).wildcard(zone)); endpoints.stream() .map(endpoint -> endpoint.routingMethod(RoutingMethod.exclusive)) .map(endpoint -> endpoint.on(Endpoint.Port.tls())) .map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system())) .map(Endpoint::dnsName).forEach(endpointDnsNames::add); return Collections.unmodifiableList(endpointDnsNames); } /** Create a common name based on a hash of the ApplicationId. This should always be less than 64 characters long. */ @SuppressWarnings("UnstableApiUsage") private static String commonNameHashOf(ApplicationId application, SystemName system) { var hashCode = Hashing.sha1().hashString(application.serializedForm(), Charset.defaultCharset()); var base32encoded = BaseEncoding.base32().omitPadding().lowerCase().encode(hashCode.asBytes()); return 'v' + base32encoded + Endpoint.dnsSuffix(system); } }
class EndpointCertificateManager { private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName()); private final ZoneRegistry zoneRegistry; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; private final Clock clock; private final BooleanFlag validateEndpointCertificates; private final StringFlag deleteUnusedEndpointCertificates; private final BooleanFlag endpointCertInSharedRouting; public EndpointCertificateManager(ZoneRegistry zoneRegistry, CuratorDb curator, SecretStore secretStore, EndpointCertificateProvider endpointCertificateProvider, Clock clock, FlagSource flagSource) { this.zoneRegistry = zoneRegistry; this.curator = curator; this.secretStore = secretStore; this.endpointCertificateProvider = endpointCertificateProvider; this.clock = clock; this.validateEndpointCertificates = Flags.VALIDATE_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.deleteUnusedEndpointCertificates = Flags.DELETE_UNUSED_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.endpointCertInSharedRouting = Flags.ENDPOINT_CERT_IN_SHARED_ROUTING.bindTo(flagSource); Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { try { this.deleteUnusedCertificates(); } catch (Throwable t) { log.log(Level.INFO, "Unexpected Throwable caught while deleting unused endpoint certificates", t); } }, 1, 10, TimeUnit.MINUTES); } public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { var t0 = Instant.now(); Optional<EndpointCertificateMetadata> metadata = getOrProvision(instance, zone, instanceSpec); metadata.ifPresent(m -> curator.writeEndpointCertificateMetadata(instance.id(), m.withLastRequested(clock.instant().getEpochSecond()))); Duration duration = Duration.between(t0, Instant.now()); if (duration.toSeconds() > 30) log.log(Level.INFO, String.format("Getting endpoint certificate metadata for %s took %d seconds!", instance.id().serializedForm(), duration.toSeconds())); return metadata; } @NotNull private Optional<EndpointCertificateMetadata> getOrProvision(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { boolean endpointCertInSharedRouting = this.endpointCertInSharedRouting.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value(); if (!zoneRegistry.zones().directlyRouted().ids().contains(zone) && !endpointCertInSharedRouting) return Optional.empty(); final var currentCertificateMetadata = curator.readEndpointCertificateMetadata(instance.id()); if (currentCertificateMetadata.isEmpty()) { var provisionedCertificateMetadata = provisionEndpointCertificate(instance, Optional.empty(), zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata); return Optional.of(provisionedCertificateMetadata); } var sansInCertificate = currentCertificateMetadata.get().requestedDnsSans(); var requiredSansForZone = dnsNamesOf(instance.id(), zone); if (sansInCertificate.isPresent() && !sansInCertificate.get().containsAll(requiredSansForZone)) { var reprovisionedCertificateMetadata = provisionEndpointCertificate(instance, currentCertificateMetadata, zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), reprovisionedCertificateMetadata); validateEndpointCertificate(reprovisionedCertificateMetadata, instance, zone); return Optional.of(reprovisionedCertificateMetadata); } var latestAvailableVersion = latestVersionInSecretStore(currentCertificateMetadata.get()); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > currentCertificateMetadata.get().version()) { var refreshedCertificateMetadata = currentCertificateMetadata.get().withVersion(latestAvailableVersion.getAsInt()); validateEndpointCertificate(refreshedCertificateMetadata, instance, zone); curator.writeEndpointCertificateMetadata(instance.id(), refreshedCertificateMetadata); return Optional.of(refreshedCertificateMetadata); } validateEndpointCertificate(currentCertificateMetadata.get(), instance, zone); return currentCertificateMetadata; } enum CleanupMode { DISABLE, DRYRUN, ENABLE } private void deleteUnusedCertificates() { CleanupMode mode = CleanupMode.valueOf(deleteUnusedEndpointCertificates.value()); if (mode == CleanupMode.DISABLE) return; var oneMonthAgo = clock.instant().minus(1, ChronoUnit.MONTHS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { log.log(LogLevel.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments" + (mode == CleanupMode.ENABLE ? ", deleting from provider and ZK" : "")); if (mode == CleanupMode.ENABLE) { endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData); curator.deleteEndpointCertificateMetadata(applicationId); } } }); } private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance, Optional<EndpointCertificateMetadata> currentMetadata, ZoneId deploymentZone, Optional<DeploymentInstanceSpec> instanceSpec) { List<String> currentlyPresentNames = currentMetadata.isPresent() ? currentMetadata.get().requestedDnsSans().orElseThrow(() -> new RuntimeException("Certificate metadata exists but SANs are not present!")) : Collections.emptyList(); var requiredZones = new LinkedHashSet<>(Set.of(deploymentZone)); var zoneCandidateList = zoneRegistry.zones().controllerUpgraded().zones().stream().map(ZoneApi::getId).collect(Collectors.toList()); if (!deploymentZone.environment().isManuallyDeployed()) { zoneCandidateList.stream() .filter(z -> z.environment().isTest() || instanceSpec.isPresent() && instanceSpec.get().deploysTo(z.environment(), z.region())) .forEach(requiredZones::add); } var requiredNames = requiredZones.stream() .flatMap(zone -> dnsNamesOf(instance.id(), zone).stream()) .collect(Collectors.toCollection(LinkedHashSet::new)); zoneCandidateList.stream() .map(zone -> dnsNamesOf(instance.id(), zone)) .filter(zoneNames -> zoneNames.stream().anyMatch(currentlyPresentNames::contains)) .filter(currentlyPresentNames::containsAll) .forEach(requiredNames::addAll); if (!requiredNames.containsAll(currentlyPresentNames)) throw new RuntimeException("SANs to be requested do not cover all existing names! Missing names: " + currentlyPresentNames.stream().filter(s -> !requiredNames.contains(s)).collect(Collectors.joining(", "))); return endpointCertificateProvider.requestCaSignedCertificate(instance.id(), List.copyOf(requiredNames), currentMetadata); } private void validateEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) { if (validateEndpointCertificates.value()) try { var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version()); if (pemEncodedEndpointCertificate == null) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Secret store returned null for certificate"); List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate); if (x509CertificateList.isEmpty()) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Empty certificate list"); if (x509CertificateList.size() < 2) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Only a single certificate found in chain - intermediate certificates likely missing"); Instant now = clock.instant(); Instant firstExpiry = Instant.MAX; for (X509Certificate x509Certificate : x509CertificateList) { Instant notBefore = x509Certificate.getNotBefore().toInstant(); Instant notAfter = x509Certificate.getNotAfter().toInstant(); if (now.isBefore(notBefore)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is not yet valid"); if (now.isAfter(notAfter)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate has expired"); if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter; } X509Certificate endEntityCertificate = x509CertificateList.get(0); Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream() .filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME)) .map(SubjectAlternativeName::getValue).collect(Collectors.toSet()); var dnsNamesOfZone = dnsNamesOf(instance.id(), zone); if (!subjectAlternativeNames.containsAll(dnsNamesOfZone)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is missing required SANs for zone " + zone.value()); } catch (SecretNotFoundException s) { throw new EndpointCertificateException(EndpointCertificateException.Type.CERT_NOT_AVAILABLE, "Certificate not found in secret store"); } catch (EndpointCertificateException e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw e; } catch (Exception e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate validation failure for app " + instance.id().serializedForm(), e); } } private List<String> dnsNamesOf(ApplicationId applicationId, ZoneId zone) { List<String> endpointDnsNames = new ArrayList<>(); endpointDnsNames.add(commonNameHashOf(applicationId, zoneRegistry.system())); List<Endpoint.EndpointBuilder> endpoints = new ArrayList<>(); if (zone.environment().isProduction()) { endpoints.add(Endpoint.of(applicationId).target(EndpointId.defaultId())); endpoints.add(Endpoint.of(applicationId).wildcard()); } endpoints.add(Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone)); endpoints.add(Endpoint.of(applicationId).wildcard(zone)); endpoints.stream() .map(endpoint -> endpoint.routingMethod(RoutingMethod.exclusive)) .map(endpoint -> endpoint.on(Endpoint.Port.tls())) .map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system())) .map(Endpoint::dnsName).forEach(endpointDnsNames::add); return Collections.unmodifiableList(endpointDnsNames); } /** Create a common name based on a hash of the ApplicationId. This should always be less than 64 characters long. */ @SuppressWarnings("UnstableApiUsage") private static String commonNameHashOf(ApplicationId application, SystemName system) { var hashCode = Hashing.sha1().hashString(application.serializedForm(), Charset.defaultCharset()); var base32encoded = BaseEncoding.base32().omitPadding().lowerCase().encode(hashCode.asBytes()); return 'v' + base32encoded + Endpoint.dnsSuffix(system); } }
I get your point.
private OptExprBuilder window(OptExprBuilder subOpt, List<AnalyticExpr> window) { if (window.isEmpty()) { return subOpt; } /* * Build ProjectOperator of partition expression and order by expression in window function. */ List<Expr> projectExpressions = new ArrayList<>(); for (AnalyticExpr analyticExpr : window) { projectExpressions.addAll(analyticExpr.getPartitionExprs()); projectExpressions.addAll(analyticExpr.getOrderByElements() .stream().map(OrderByElement::getExpr).collect(Collectors.toList())); } final ExpressionMapping expressionMapping = subOpt.getExpressionMapping(); boolean allColumnRef = true; Map<Expr, ColumnRefOperator> tempMapping = new HashMap<>(); for (Expr expression : projectExpressions) { ScalarOperator operator = SqlToScalarOperatorTranslator.translate(expression, expressionMapping, columnRefFactory); if (!operator.isColumnRef()) { allColumnRef = false; tempMapping.clear(); break; } else { tempMapping.put(expression, (ColumnRefOperator) operator); } } if (allColumnRef) { expressionMapping.getExpressionToColumns().putAll(tempMapping); } /* * If there is no expression calculate in partition and order by, * there is no need to add ProjectOperator here */ if (!allColumnRef) { ExpressionMapping outputTranslations = new ExpressionMapping(subOpt.getScope()); List<ColumnRefOperator> fieldMappings = new ArrayList<>(); Map<ColumnRefOperator, ScalarOperator> projections = Maps.newHashMap(); for (ColumnRefOperator expression : subOpt.getFieldMappings()) { ColumnRefOperator variable = columnRefFactory.create(expression, expression.getType(), expression.isNullable()); projections.put(variable, expression); fieldMappings.add(variable); } outputTranslations.setFieldMappings(fieldMappings); for (Expr expression : subOpt.getExpressionMapping().getAllExpressions()) { ColumnRefOperator columnRef = findOrCreateColumnRefForExpr(expression, subOpt.getExpressionMapping(), projections, columnRefFactory); outputTranslations.put(expression, columnRef); } for (Expr expression : projectExpressions) { ColumnRefOperator columnRef = findOrCreateColumnRefForExpr(expression, subOpt.getExpressionMapping(), projections, columnRefFactory); outputTranslations.put(expression, columnRef); } LogicalProjectOperator projectOperator = new LogicalProjectOperator(projections); subOpt.setExpressionMapping(outputTranslations); subOpt = subOpt.withNewRoot(projectOperator); } /* * If necessary, rewrites the analytic function, window, and/or order-by elements * into a standard format for the purpose of simpler backend execution */ List<WindowTransformer.WindowOperator> windowOperators = new ArrayList<>(); for (AnalyticExpr analyticExpr : window) { WindowTransformer.WindowOperator rewriteOperator = WindowTransformer.standardize(analyticExpr); if (windowOperators.contains(rewriteOperator)) { WindowTransformer.WindowOperator windowOperator = windowOperators.get(windowOperators.indexOf(rewriteOperator)); if (rewriteOperator.isSkewed()) { windowOperator.setSkewed(); } windowOperator.addFunction(analyticExpr); } else { windowOperators.add(rewriteOperator); } } List<LogicalWindowOperator> logicalWindowOperators = WindowTransformer.reorderWindowOperator(windowOperators, columnRefFactory, subOpt); for (LogicalWindowOperator logicalWindowOperator : logicalWindowOperators) { subOpt = subOpt.withNewRoot(logicalWindowOperator); } return subOpt; }
}
private OptExprBuilder window(OptExprBuilder subOpt, List<AnalyticExpr> window) { if (window.isEmpty()) { return subOpt; } /* * Build ProjectOperator of partition expression and order by expression in window function. */ List<Expr> projectExpressions = new ArrayList<>(); for (AnalyticExpr analyticExpr : window) { projectExpressions.addAll(analyticExpr.getPartitionExprs()); projectExpressions.addAll(analyticExpr.getOrderByElements() .stream().map(OrderByElement::getExpr).collect(Collectors.toList())); } final ExpressionMapping expressionMapping = subOpt.getExpressionMapping(); boolean allColumnRef = true; Map<Expr, ColumnRefOperator> tempMapping = new HashMap<>(); for (Expr expression : projectExpressions) { ScalarOperator operator = SqlToScalarOperatorTranslator.translate(expression, expressionMapping, columnRefFactory); if (!operator.isColumnRef()) { allColumnRef = false; tempMapping.clear(); break; } else { tempMapping.put(expression, (ColumnRefOperator) operator); } } if (allColumnRef) { expressionMapping.getExpressionToColumns().putAll(tempMapping); } /* * If there is no expression calculate in partition and order by, * there is no need to add ProjectOperator here */ if (!allColumnRef) { ExpressionMapping outputTranslations = new ExpressionMapping(subOpt.getScope()); List<ColumnRefOperator> fieldMappings = new ArrayList<>(); Map<ColumnRefOperator, ScalarOperator> projections = Maps.newHashMap(); for (ColumnRefOperator expression : subOpt.getFieldMappings()) { ColumnRefOperator variable = columnRefFactory.create(expression, expression.getType(), expression.isNullable()); projections.put(variable, expression); fieldMappings.add(variable); } outputTranslations.setFieldMappings(fieldMappings); for (Expr expression : subOpt.getExpressionMapping().getAllExpressions()) { ColumnRefOperator columnRef = findOrCreateColumnRefForExpr(expression, subOpt.getExpressionMapping(), projections, columnRefFactory); outputTranslations.put(expression, columnRef); } for (Expr expression : projectExpressions) { ColumnRefOperator columnRef = findOrCreateColumnRefForExpr(expression, subOpt.getExpressionMapping(), projections, columnRefFactory); outputTranslations.put(expression, columnRef); } LogicalProjectOperator projectOperator = new LogicalProjectOperator(projections); subOpt.setExpressionMapping(outputTranslations); subOpt = subOpt.withNewRoot(projectOperator); } /* * If necessary, rewrites the analytic function, window, and/or order-by elements * into a standard format for the purpose of simpler backend execution */ List<WindowTransformer.WindowOperator> windowOperators = new ArrayList<>(); for (AnalyticExpr analyticExpr : window) { WindowTransformer.WindowOperator rewriteOperator = WindowTransformer.standardize(analyticExpr); if (windowOperators.contains(rewriteOperator)) { WindowTransformer.WindowOperator windowOperator = windowOperators.get(windowOperators.indexOf(rewriteOperator)); if (rewriteOperator.isSkewed()) { windowOperator.setSkewed(); } windowOperator.addFunction(analyticExpr); } else { windowOperators.add(rewriteOperator); } } List<LogicalWindowOperator> logicalWindowOperators = WindowTransformer.reorderWindowOperator(windowOperators, columnRefFactory, subOpt); for (LogicalWindowOperator logicalWindowOperator : logicalWindowOperators) { subOpt = subOpt.withNewRoot(logicalWindowOperator); } return subOpt; }
class QueryTransformer { private final ColumnRefFactory columnRefFactory; private final ConnectContext session; private final List<ColumnRefOperator> correlation = new ArrayList<>(); private final CTETransformerContext cteContext; private final boolean inlineView; private final Map<Operator, ParseNode> optToAstMap; public static final String GROUPING_ID = "GROUPING_ID"; public static final String GROUPING = "GROUPING"; public QueryTransformer(ColumnRefFactory columnRefFactory, ConnectContext session, CTETransformerContext cteContext, boolean inlineView, Map<Operator, ParseNode> optToAstMap) { this.columnRefFactory = columnRefFactory; this.session = session; this.cteContext = cteContext; this.inlineView = inlineView; this.optToAstMap = optToAstMap; } public LogicalPlan plan(SelectRelation queryBlock, ExpressionMapping outer) { OptExprBuilder builder = planFrom(queryBlock.getRelation(), cteContext); builder.setExpressionMapping(new ExpressionMapping(builder.getScope(), builder.getFieldMappings(), outer)); Map<Expr, SlotRef> generatedExprToColumnRef = queryBlock.getGeneratedExprToColumnRef(); ExpressionMapping expressionMapping = builder.getExpressionMapping(); for (Map.Entry<Expr, SlotRef> m : generatedExprToColumnRef.entrySet()) { ScalarOperator scalarOperator = SqlToScalarOperatorTranslator.translate(m.getValue(), builder.getExpressionMapping(), columnRefFactory); expressionMapping.put(m.getKey(), (ColumnRefOperator) scalarOperator); } builder = filter(builder, queryBlock.getPredicate()); builder = aggregate(builder, queryBlock.getGroupBy(), queryBlock.getAggregate(), queryBlock.getGroupingSetsList(), queryBlock.getGroupingFunctionCallExprs()); builder = filter(builder, queryBlock.getHaving()); List<AnalyticExpr> analyticExprList = new ArrayList<>(queryBlock.getOutputAnalytic()); analyticExprList.addAll(queryBlock.getOrderByAnalytic()); builder = window(builder, analyticExprList); if (queryBlock.hasOrderByClause()) { if (!queryBlock.getGroupBy().isEmpty() || !queryBlock.getAggregate().isEmpty()) { List<String> outputNames = new ArrayList<>(queryBlock.getColumnOutputNames()); for (int i = 0; i < queryBlock.getOrderSourceExpressions().size(); ++i) { outputNames.add(queryBlock.getOrderSourceExpressions().get(i).toString()); } builder = projectForOrder(builder, Iterables.concat(queryBlock.getOutputExpression(), queryBlock.getOrderSourceExpressions(), queryBlock.getOrderByAnalytic()), queryBlock.getOutputExprInOrderByScope(), outputNames, builder.getFieldMappings(), queryBlock.getOrderScope(), true); } else { builder = projectForOrder(builder, Iterables.concat(queryBlock.getOutputExpression(), queryBlock.getOrderByAnalytic()), queryBlock.getOutputExprInOrderByScope(), queryBlock.getColumnOutputNames(), builder.getFieldMappings(), queryBlock.getOrderScope(), queryBlock.isDistinct()); } } builder = distinct(builder, queryBlock.isDistinct(), queryBlock.getOutputExpression()); builder = project(builder, Iterables.concat(queryBlock.getOrderByExpressions(), queryBlock.getOutputExpression())); List<ColumnRefOperator> orderByColumns = Lists.newArrayList(); builder = sort(builder, queryBlock.getOrderBy(), orderByColumns); builder = limit(builder, queryBlock.getLimit()); List<ColumnRefOperator> outputColumns = computeOutputs(builder, queryBlock.getOutputExpression(), columnRefFactory); if (!orderByColumns.isEmpty() && !outputColumns.containsAll(orderByColumns)) { long limit = queryBlock.hasLimit() ? queryBlock.getLimit().getLimit() : -1; builder = project(builder, queryBlock.getOutputExpression(), limit); } return new LogicalPlan(builder, outputColumns, correlation); } private static List<ColumnRefOperator> computeOutputs(OptExprBuilder builder, List<Expr> outputExpressions, ColumnRefFactory columnRefFactory) { List<ColumnRefOperator> outputs = new ArrayList<>(); for (Expr expression : outputExpressions) { outputs.add((ColumnRefOperator) SqlToScalarOperatorTranslator .translate(expression, builder.getExpressionMapping(), columnRefFactory)); } return outputs; } private OptExprBuilder planFrom(Relation node, CTETransformerContext cteContext) { TransformerContext transformerContext = new TransformerContext( columnRefFactory, session, new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields())), cteContext, inlineView, optToAstMap); return new RelationTransformer(transformerContext).visit(node).getRootBuilder(); } private OptExprBuilder projectForOrder(OptExprBuilder subOpt, Iterable<Expr> outputExpression, List<Integer> outputExprInOrderByScope, List<String> outputNames, List<ColumnRefOperator> sourceExpression, Scope scope, boolean withAggregation) { ExpressionMapping outputTranslations = new ExpressionMapping(scope); Map<ColumnRefOperator, ScalarOperator> projections = Maps.newHashMap(); int outputExprIdx = 0; for (Expr expression : outputExpression) { Map<ScalarOperator, SubqueryOperator> subqueryPlaceholders = Maps.newHashMap(); ScalarOperator scalarOperator = SqlToScalarOperatorTranslator.translate(expression, subOpt.getExpressionMapping(), columnRefFactory, session, cteContext, subOpt, subqueryPlaceholders, false); Pair<ScalarOperator, OptExprBuilder> pair = SubqueryUtils.rewriteScalarOperator(scalarOperator, subOpt, subqueryPlaceholders); scalarOperator = pair.first; subOpt = pair.second; ColumnRefOperator columnRefOperator = getOrCreateColumnRefOperator(expression, scalarOperator, projections); projections.put(columnRefOperator, scalarOperator); if (outputExprInOrderByScope.contains(outputExprIdx)) { outputTranslations.putWithSymbol(expression, new SlotRef(null, outputNames.get(outputExprIdx)), columnRefOperator); } else { outputTranslations.putWithSymbol(expression, expression, columnRefOperator); } outputExprIdx++; } if (!withAggregation) { List<ColumnRefOperator> fieldMappings = new ArrayList<>(outputTranslations.getFieldMappings()); for (int i = 0; i < sourceExpression.size(); ++i) { ColumnRefOperator columnRefOperator = sourceExpression.get(i); projections.put(columnRefOperator, columnRefOperator); fieldMappings.set(scope.getRelationFields().size() + i, columnRefOperator); } outputTranslations.setFieldMappings(fieldMappings); } outputTranslations.addExpressionToColumns(subOpt.getExpressionMapping().getExpressionToColumns()); LogicalProjectOperator projectOperator = new LogicalProjectOperator(projections); return new OptExprBuilder(projectOperator, Lists.newArrayList(subOpt), outputTranslations); } private OptExprBuilder project(OptExprBuilder subOpt, Iterable<Expr> expressions) { return project(subOpt, expressions, -1); } private OptExprBuilder project(OptExprBuilder subOpt, Iterable<Expr> expressions, long limit) { ExpressionMapping outputTranslations = new ExpressionMapping(subOpt.getScope(), subOpt.getFieldMappings()); Map<ColumnRefOperator, ScalarOperator> projections = Maps.newHashMap(); for (Expr expression : expressions) { Map<ScalarOperator, SubqueryOperator> subqueryPlaceholders = Maps.newHashMap(); ScalarOperator scalarOperator = SqlToScalarOperatorTranslator.translate(expression, subOpt.getExpressionMapping(), columnRefFactory, session, cteContext, subOpt, subqueryPlaceholders, false); Pair<ScalarOperator, OptExprBuilder> pair = SubqueryUtils.rewriteScalarOperator(scalarOperator, subOpt, subqueryPlaceholders); scalarOperator = pair.first; subOpt = pair.second; ColumnRefOperator columnRefOperator = getOrCreateColumnRefOperator(expression, scalarOperator, projections); projections.put(columnRefOperator, scalarOperator); outputTranslations.put(expression, columnRefOperator); } outputTranslations.addExpressionToColumns(subOpt.getExpressionMapping().getExpressionToColumns()); LogicalProjectOperator projectOperator = new LogicalProjectOperator(projections, limit); return new OptExprBuilder(projectOperator, Lists.newArrayList(subOpt), outputTranslations); } private OptExprBuilder filter(OptExprBuilder subOpt, Expr predicate) { if (predicate == null) { return subOpt; } Map<ScalarOperator, SubqueryOperator> subqueryPlaceholders = Maps.newHashMap(); ScalarOperator scalarPredicate = SqlToScalarOperatorTranslator.translate(predicate, subOpt.getExpressionMapping(), correlation, columnRefFactory, session, cteContext, subOpt, subqueryPlaceholders, true); Pair<ScalarOperator, OptExprBuilder> pair = SubqueryUtils.rewriteScalarOperator(scalarPredicate, subOpt, subqueryPlaceholders); scalarPredicate = pair.first; subOpt = pair.second; if (scalarPredicate == null) { return subOpt; } LogicalFilterOperator filterOperator = new LogicalFilterOperator(scalarPredicate); return subOpt.withNewRoot(filterOperator); } private OptExprBuilder limit(OptExprBuilder subOpt, LimitElement limit) { if (limit == null) { return subOpt; } LogicalLimitOperator limitOperator = LogicalLimitOperator.init(limit.getLimit(), limit.getOffset()); return subOpt.withNewRoot(limitOperator); } public OptExprBuilder aggregate(OptExprBuilder subOpt, List<Expr> groupByExpressions, List<FunctionCallExpr> aggregates, List<List<Expr>> groupingSetsList, List<Expr> groupingFunctionCallExprs) { if (aggregates.size() == 0 && groupByExpressions.size() == 0) { return subOpt; } List<FunctionCallExpr> copyAggregates; if (groupingSetsList != null) { copyAggregates = aggregates.stream().map(e -> (FunctionCallExpr) e.clone()) .collect(Collectors.toList()); for (Expr groupBy : groupByExpressions) { copyAggregates.replaceAll( root -> (FunctionCallExpr) replaceExprBottomUp(root, groupBy, new CloneExpr(groupBy))); } } else { copyAggregates = aggregates; } ImmutableList.Builder<Expr> arguments = ImmutableList.builder(); copyAggregates.stream().filter(f -> !f.getParams().isStar()) .map(TreeNode::getChildren).flatMap(List::stream) .filter(e -> !(e.isConstant())).forEach(arguments::add); Iterable<Expr> inputs = Iterables.concat(groupByExpressions, arguments.build()); if (!Iterables.isEmpty(inputs)) { subOpt = project(subOpt, inputs); } ExpressionMapping groupingTranslations = new ExpressionMapping(subOpt.getScope(), subOpt.getFieldMappings()); List<ColumnRefOperator> groupByColumnRefs = new ArrayList<>(); boolean groupAllConst = groupByExpressions.stream().allMatch(Expr::isConstant); for (Expr groupingItem : groupByExpressions) { if (groupingItem.isConstant() && !(groupAllConst && groupByColumnRefs.isEmpty()) && groupingSetsList == null) { continue; } ScalarOperator groupingKey = SqlToScalarOperatorTranslator.translate(groupingItem, subOpt.getExpressionMapping(), columnRefFactory); ColumnRefOperator colRef = (ColumnRefOperator) groupingKey; if (!groupByColumnRefs.contains(colRef)) { groupByColumnRefs.add(colRef); } groupingTranslations.put(groupingItem, colRef); } Map<ColumnRefOperator, CallOperator> aggregationsMap = Maps.newHashMap(); for (int i = 0; i < aggregates.size(); i++) { FunctionCallExpr copyAggregate = copyAggregates.get(i); ScalarOperator aggCallOperator = SqlToScalarOperatorTranslator.translate(copyAggregate, subOpt.getExpressionMapping(), columnRefFactory); CallOperator aggOperator = (CallOperator) aggCallOperator; ColumnRefOperator colRef = columnRefFactory.create(aggOperator.getFnName(), copyAggregate.getType(), copyAggregate.isNullable()); aggregationsMap.put(colRef, aggOperator); groupingTranslations.put(aggregates.get(i), colRef); } if (groupingSetsList != null) { /* * repeatOutput is used to record the output column of repeatOperator, * this output column only represents the generated grouping_id column */ List<ColumnRefOperator> repeatOutput = new ArrayList<>(); /* * groupingIdsBitSets is used to record the complete grouping_id, * which contains all the group by columns. * groupingIds is converted by groupingIdsBitSets */ ArrayList<BitSet> groupingIdsBitSets = new ArrayList<>(); List<List<Long>> groupingIds = new ArrayList<>(); /* * repeatColumnRefList is used to record the column reference * that needs to be repeatedly calculated. * This column reference is come from the child of repeat operator */ List<List<ColumnRefOperator>> repeatColumnRefList = new ArrayList<>(); for (List<Expr> grouping : groupingSetsList) { List<ColumnRefOperator> repeatColumnRef = new ArrayList<>(); BitSet groupingIdBitSet = new BitSet(groupByColumnRefs.size()); groupingIdBitSet.set(0, groupByExpressions.size(), true); for (Expr groupingField : grouping) { ColumnRefOperator groupingKey = (ColumnRefOperator) SqlToScalarOperatorTranslator.translate( groupingField, subOpt.getExpressionMapping(), columnRefFactory); repeatColumnRef.add(groupingKey); if (groupByColumnRefs.contains(groupingKey)) { groupingIdBitSet.set(groupByColumnRefs.indexOf(groupingKey), false); } } groupingIdsBitSets.add(groupingIdBitSet); repeatColumnRefList.add(repeatColumnRef); } ColumnRefOperator grouping = columnRefFactory.create(GROUPING_ID, Type.BIGINT, false); List<Long> groupingID = new ArrayList<>(); for (BitSet bitSet : groupingIdsBitSets) { long gid = Utils.convertBitSetToLong(bitSet, groupByColumnRefs.size()); while (groupingID.contains(gid)) { gid += Math.pow(2, groupByColumnRefs.size()); } groupingID.add(gid); } groupingIds.add(groupingID); groupByColumnRefs.add(grouping); repeatOutput.add(grouping); for (Expr groupingFunction : groupingFunctionCallExprs) { grouping = columnRefFactory.create(GROUPING, Type.BIGINT, false); ArrayList<BitSet> tempGroupingIdsBitSets = new ArrayList<>(); for (int i = 0; i < repeatColumnRefList.size(); ++i) { tempGroupingIdsBitSets.add(new BitSet(groupingFunction.getChildren().size())); } for (int childIdx = 0; childIdx < groupingFunction.getChildren().size(); ++childIdx) { SlotRef slotRef = (SlotRef) groupingFunction.getChild(childIdx); ColumnRefOperator groupingKey = (ColumnRefOperator) SqlToScalarOperatorTranslator .translate(slotRef, subOpt.getExpressionMapping(), columnRefFactory); for (List<ColumnRefOperator> repeatColumns : repeatColumnRefList) { if (repeatColumns.contains(groupingKey)) { for (int repeatColIdx = 0; repeatColIdx < repeatColumnRefList.size(); ++repeatColIdx) { tempGroupingIdsBitSets.get(repeatColIdx).set(childIdx, groupingIdsBitSets.get(repeatColIdx) .get(groupByColumnRefs.indexOf(groupingKey))); } } } } groupingTranslations.put(groupingFunction, grouping); groupingIds.add(tempGroupingIdsBitSets.stream().map(bitset -> Utils.convertBitSetToLong(bitset, groupingFunction.getChildren().size())) .collect(Collectors.toList())); groupByColumnRefs.add(grouping); repeatOutput.add(grouping); } LogicalRepeatOperator repeatOperator = new LogicalRepeatOperator(repeatOutput, repeatColumnRefList, groupingIds); subOpt = new OptExprBuilder(repeatOperator, Lists.newArrayList(subOpt), groupingTranslations); } return new OptExprBuilder( new LogicalAggregationOperator(AggType.GLOBAL, groupByColumnRefs, aggregationsMap), Lists.newArrayList(subOpt), groupingTranslations); } private Expr replaceExprBottomUp(Expr root, Expr pattern, Expr replace) { if (root.getChildren().size() > 0) { for (int i = 0; i < root.getChildren().size(); i++) { Expr result = replaceExprBottomUp(root.getChild(i), pattern, replace); root.setChild(i, result); } } if (root.equals(pattern)) { return replace; } return root; } private OptExprBuilder sort(OptExprBuilder subOpt, List<OrderByElement> orderByExpressions, List<ColumnRefOperator> orderByColumns) { if (orderByExpressions.isEmpty()) { return subOpt; } List<Ordering> orderings = new ArrayList<>(); for (OrderByElement item : orderByExpressions) { if (item.getExpr().isLiteral()) { continue; } ColumnRefOperator column = (ColumnRefOperator) SqlToScalarOperatorTranslator.translate(item.getExpr(), subOpt.getExpressionMapping(), columnRefFactory); Ordering ordering = new Ordering(column, item.getIsAsc(), OrderByElement.nullsFirst(item.getNullsFirstParam())); if (!orderByColumns.contains(column)) { orderings.add(ordering); orderByColumns.add(column); } } if (orderByColumns.isEmpty()) { return subOpt; } LogicalTopNOperator sortOperator = new LogicalTopNOperator(orderings); return subOpt.withNewRoot(sortOperator); } private OptExprBuilder distinct(OptExprBuilder subOpt, boolean isDistinct, List<Expr> outputExpressions) { if (isDistinct) { subOpt = project(subOpt, outputExpressions); List<ColumnRefOperator> groupByColumns = Lists.newArrayList(); for (Expr expr : outputExpressions) { ColumnRefOperator column = (ColumnRefOperator) SqlToScalarOperatorTranslator .translate(expr, subOpt.getExpressionMapping(), columnRefFactory); if (!groupByColumns.contains(column)) { groupByColumns.add(column); } } return subOpt.withNewRoot( new LogicalAggregationOperator(AggType.GLOBAL, groupByColumns, new HashMap<>())); } else { return subOpt; } } private ColumnRefOperator getOrCreateColumnRefOperator(Expr expression, ScalarOperator scalarOperator, Map<ColumnRefOperator, ScalarOperator> projections) { ColumnRefOperator columnRefOperator; if (scalarOperator.isColumnRef()) { columnRefOperator = (ColumnRefOperator) scalarOperator; } else if (scalarOperator.isVariable() && projections.containsValue(scalarOperator)) { columnRefOperator = projections.entrySet().stream() .filter(e -> scalarOperator.equals(e.getValue())) .findAny() .map(Map.Entry::getKey) .orElse(null); Preconditions.checkNotNull(columnRefOperator); } else { columnRefOperator = columnRefFactory.create(expression, expression.getType(), scalarOperator.isNullable()); } return columnRefOperator; } }
class QueryTransformer { private final ColumnRefFactory columnRefFactory; private final ConnectContext session; private final List<ColumnRefOperator> correlation = new ArrayList<>(); private final CTETransformerContext cteContext; private final boolean inlineView; private final Map<Operator, ParseNode> optToAstMap; public static final String GROUPING_ID = "GROUPING_ID"; public static final String GROUPING = "GROUPING"; public QueryTransformer(ColumnRefFactory columnRefFactory, ConnectContext session, CTETransformerContext cteContext, boolean inlineView, Map<Operator, ParseNode> optToAstMap) { this.columnRefFactory = columnRefFactory; this.session = session; this.cteContext = cteContext; this.inlineView = inlineView; this.optToAstMap = optToAstMap; } public LogicalPlan plan(SelectRelation queryBlock, ExpressionMapping outer) { OptExprBuilder builder = planFrom(queryBlock.getRelation(), cteContext); builder.setExpressionMapping(new ExpressionMapping(builder.getScope(), builder.getFieldMappings(), outer)); Map<Expr, SlotRef> generatedExprToColumnRef = queryBlock.getGeneratedExprToColumnRef(); ExpressionMapping expressionMapping = builder.getExpressionMapping(); for (Map.Entry<Expr, SlotRef> m : generatedExprToColumnRef.entrySet()) { ScalarOperator scalarOperator = SqlToScalarOperatorTranslator.translate(m.getValue(), builder.getExpressionMapping(), columnRefFactory); expressionMapping.put(m.getKey(), (ColumnRefOperator) scalarOperator); } builder = filter(builder, queryBlock.getPredicate()); builder = aggregate(builder, queryBlock.getGroupBy(), queryBlock.getAggregate(), queryBlock.getGroupingSetsList(), queryBlock.getGroupingFunctionCallExprs()); builder = filter(builder, queryBlock.getHaving()); List<AnalyticExpr> analyticExprList = new ArrayList<>(queryBlock.getOutputAnalytic()); analyticExprList.addAll(queryBlock.getOrderByAnalytic()); builder = window(builder, analyticExprList); if (queryBlock.hasOrderByClause()) { if (!queryBlock.getGroupBy().isEmpty() || !queryBlock.getAggregate().isEmpty()) { List<String> outputNames = new ArrayList<>(queryBlock.getColumnOutputNames()); for (int i = 0; i < queryBlock.getOrderSourceExpressions().size(); ++i) { outputNames.add(queryBlock.getOrderSourceExpressions().get(i).toString()); } builder = projectForOrder(builder, Iterables.concat(queryBlock.getOutputExpression(), queryBlock.getOrderSourceExpressions(), queryBlock.getOrderByAnalytic()), queryBlock.getOutputExprInOrderByScope(), outputNames, builder.getFieldMappings(), queryBlock.getOrderScope(), true); } else { builder = projectForOrder(builder, Iterables.concat(queryBlock.getOutputExpression(), queryBlock.getOrderByAnalytic()), queryBlock.getOutputExprInOrderByScope(), queryBlock.getColumnOutputNames(), builder.getFieldMappings(), queryBlock.getOrderScope(), queryBlock.isDistinct()); } } builder = distinct(builder, queryBlock.isDistinct(), queryBlock.getOutputExpression()); builder = project(builder, Iterables.concat(queryBlock.getOrderByExpressions(), queryBlock.getOutputExpression())); List<ColumnRefOperator> orderByColumns = Lists.newArrayList(); builder = sort(builder, queryBlock.getOrderBy(), orderByColumns); builder = limit(builder, queryBlock.getLimit()); List<ColumnRefOperator> outputColumns = computeOutputs(builder, queryBlock.getOutputExpression(), columnRefFactory); if (!orderByColumns.isEmpty() && !outputColumns.containsAll(orderByColumns)) { long limit = queryBlock.hasLimit() ? queryBlock.getLimit().getLimit() : -1; builder = project(builder, queryBlock.getOutputExpression(), limit); } return new LogicalPlan(builder, outputColumns, correlation); } private static List<ColumnRefOperator> computeOutputs(OptExprBuilder builder, List<Expr> outputExpressions, ColumnRefFactory columnRefFactory) { List<ColumnRefOperator> outputs = new ArrayList<>(); for (Expr expression : outputExpressions) { outputs.add((ColumnRefOperator) SqlToScalarOperatorTranslator .translate(expression, builder.getExpressionMapping(), columnRefFactory)); } return outputs; } private OptExprBuilder planFrom(Relation node, CTETransformerContext cteContext) { TransformerContext transformerContext = new TransformerContext( columnRefFactory, session, new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields())), cteContext, inlineView, optToAstMap); return new RelationTransformer(transformerContext).visit(node).getRootBuilder(); } private OptExprBuilder projectForOrder(OptExprBuilder subOpt, Iterable<Expr> outputExpression, List<Integer> outputExprInOrderByScope, List<String> outputNames, List<ColumnRefOperator> sourceExpression, Scope scope, boolean withAggregation) { ExpressionMapping outputTranslations = new ExpressionMapping(scope); Map<ColumnRefOperator, ScalarOperator> projections = Maps.newHashMap(); int outputExprIdx = 0; for (Expr expression : outputExpression) { Map<ScalarOperator, SubqueryOperator> subqueryPlaceholders = Maps.newHashMap(); ScalarOperator scalarOperator = SqlToScalarOperatorTranslator.translate(expression, subOpt.getExpressionMapping(), columnRefFactory, session, cteContext, subOpt, subqueryPlaceholders, false); Pair<ScalarOperator, OptExprBuilder> pair = SubqueryUtils.rewriteScalarOperator(scalarOperator, subOpt, subqueryPlaceholders); scalarOperator = pair.first; subOpt = pair.second; ColumnRefOperator columnRefOperator = getOrCreateColumnRefOperator(expression, scalarOperator, projections); projections.put(columnRefOperator, scalarOperator); if (outputExprInOrderByScope.contains(outputExprIdx)) { outputTranslations.putWithSymbol(expression, new SlotRef(null, outputNames.get(outputExprIdx)), columnRefOperator); } else { outputTranslations.putWithSymbol(expression, expression, columnRefOperator); } outputExprIdx++; } if (!withAggregation) { List<ColumnRefOperator> fieldMappings = new ArrayList<>(outputTranslations.getFieldMappings()); for (int i = 0; i < sourceExpression.size(); ++i) { ColumnRefOperator columnRefOperator = sourceExpression.get(i); projections.put(columnRefOperator, columnRefOperator); fieldMappings.set(scope.getRelationFields().size() + i, columnRefOperator); } outputTranslations.setFieldMappings(fieldMappings); } outputTranslations.addExpressionToColumns(subOpt.getExpressionMapping().getExpressionToColumns()); LogicalProjectOperator projectOperator = new LogicalProjectOperator(projections); return new OptExprBuilder(projectOperator, Lists.newArrayList(subOpt), outputTranslations); } private OptExprBuilder project(OptExprBuilder subOpt, Iterable<Expr> expressions) { return project(subOpt, expressions, -1); } private OptExprBuilder project(OptExprBuilder subOpt, Iterable<Expr> expressions, long limit) { ExpressionMapping outputTranslations = new ExpressionMapping(subOpt.getScope(), subOpt.getFieldMappings()); Map<ColumnRefOperator, ScalarOperator> projections = Maps.newHashMap(); for (Expr expression : expressions) { Map<ScalarOperator, SubqueryOperator> subqueryPlaceholders = Maps.newHashMap(); ScalarOperator scalarOperator = SqlToScalarOperatorTranslator.translate(expression, subOpt.getExpressionMapping(), columnRefFactory, session, cteContext, subOpt, subqueryPlaceholders, false); Pair<ScalarOperator, OptExprBuilder> pair = SubqueryUtils.rewriteScalarOperator(scalarOperator, subOpt, subqueryPlaceholders); scalarOperator = pair.first; subOpt = pair.second; ColumnRefOperator columnRefOperator = getOrCreateColumnRefOperator(expression, scalarOperator, projections); projections.put(columnRefOperator, scalarOperator); outputTranslations.put(expression, columnRefOperator); } outputTranslations.addExpressionToColumns(subOpt.getExpressionMapping().getExpressionToColumns()); LogicalProjectOperator projectOperator = new LogicalProjectOperator(projections, limit); return new OptExprBuilder(projectOperator, Lists.newArrayList(subOpt), outputTranslations); } private OptExprBuilder filter(OptExprBuilder subOpt, Expr predicate) { if (predicate == null) { return subOpt; } Map<ScalarOperator, SubqueryOperator> subqueryPlaceholders = Maps.newHashMap(); ScalarOperator scalarPredicate = SqlToScalarOperatorTranslator.translate(predicate, subOpt.getExpressionMapping(), correlation, columnRefFactory, session, cteContext, subOpt, subqueryPlaceholders, true); Pair<ScalarOperator, OptExprBuilder> pair = SubqueryUtils.rewriteScalarOperator(scalarPredicate, subOpt, subqueryPlaceholders); scalarPredicate = pair.first; subOpt = pair.second; if (scalarPredicate == null) { return subOpt; } LogicalFilterOperator filterOperator = new LogicalFilterOperator(scalarPredicate); return subOpt.withNewRoot(filterOperator); } private OptExprBuilder limit(OptExprBuilder subOpt, LimitElement limit) { if (limit == null) { return subOpt; } LogicalLimitOperator limitOperator = LogicalLimitOperator.init(limit.getLimit(), limit.getOffset()); return subOpt.withNewRoot(limitOperator); } public OptExprBuilder aggregate(OptExprBuilder subOpt, List<Expr> groupByExpressions, List<FunctionCallExpr> aggregates, List<List<Expr>> groupingSetsList, List<Expr> groupingFunctionCallExprs) { if (aggregates.size() == 0 && groupByExpressions.size() == 0) { return subOpt; } List<FunctionCallExpr> copyAggregates; if (groupingSetsList != null) { copyAggregates = aggregates.stream().map(e -> (FunctionCallExpr) e.clone()) .collect(Collectors.toList()); for (Expr groupBy : groupByExpressions) { copyAggregates.replaceAll( root -> (FunctionCallExpr) replaceExprBottomUp(root, groupBy, new CloneExpr(groupBy))); } } else { copyAggregates = aggregates; } ImmutableList.Builder<Expr> arguments = ImmutableList.builder(); copyAggregates.stream().filter(f -> !f.getParams().isStar()) .map(TreeNode::getChildren).flatMap(List::stream) .filter(e -> !(e.isConstant())).forEach(arguments::add); Iterable<Expr> inputs = Iterables.concat(groupByExpressions, arguments.build()); if (!Iterables.isEmpty(inputs)) { subOpt = project(subOpt, inputs); } ExpressionMapping groupingTranslations = new ExpressionMapping(subOpt.getScope(), subOpt.getFieldMappings()); List<ColumnRefOperator> groupByColumnRefs = new ArrayList<>(); boolean groupAllConst = groupByExpressions.stream().allMatch(Expr::isConstant); for (Expr groupingItem : groupByExpressions) { if (groupingItem.isConstant() && !(groupAllConst && groupByColumnRefs.isEmpty()) && groupingSetsList == null) { continue; } ScalarOperator groupingKey = SqlToScalarOperatorTranslator.translate(groupingItem, subOpt.getExpressionMapping(), columnRefFactory); ColumnRefOperator colRef = (ColumnRefOperator) groupingKey; if (!groupByColumnRefs.contains(colRef)) { groupByColumnRefs.add(colRef); } groupingTranslations.put(groupingItem, colRef); } Map<ColumnRefOperator, CallOperator> aggregationsMap = Maps.newHashMap(); for (int i = 0; i < aggregates.size(); i++) { FunctionCallExpr copyAggregate = copyAggregates.get(i); ScalarOperator aggCallOperator = SqlToScalarOperatorTranslator.translate(copyAggregate, subOpt.getExpressionMapping(), columnRefFactory); CallOperator aggOperator = (CallOperator) aggCallOperator; ColumnRefOperator colRef = columnRefFactory.create(aggOperator.getFnName(), copyAggregate.getType(), copyAggregate.isNullable()); aggregationsMap.put(colRef, aggOperator); groupingTranslations.put(aggregates.get(i), colRef); } if (groupingSetsList != null) { /* * repeatOutput is used to record the output column of repeatOperator, * this output column only represents the generated grouping_id column */ List<ColumnRefOperator> repeatOutput = new ArrayList<>(); /* * groupingIdsBitSets is used to record the complete grouping_id, * which contains all the group by columns. * groupingIds is converted by groupingIdsBitSets */ ArrayList<BitSet> groupingIdsBitSets = new ArrayList<>(); List<List<Long>> groupingIds = new ArrayList<>(); /* * repeatColumnRefList is used to record the column reference * that needs to be repeatedly calculated. * This column reference is come from the child of repeat operator */ List<List<ColumnRefOperator>> repeatColumnRefList = new ArrayList<>(); for (List<Expr> grouping : groupingSetsList) { List<ColumnRefOperator> repeatColumnRef = new ArrayList<>(); BitSet groupingIdBitSet = new BitSet(groupByColumnRefs.size()); groupingIdBitSet.set(0, groupByExpressions.size(), true); for (Expr groupingField : grouping) { ColumnRefOperator groupingKey = (ColumnRefOperator) SqlToScalarOperatorTranslator.translate( groupingField, subOpt.getExpressionMapping(), columnRefFactory); repeatColumnRef.add(groupingKey); if (groupByColumnRefs.contains(groupingKey)) { groupingIdBitSet.set(groupByColumnRefs.indexOf(groupingKey), false); } } groupingIdsBitSets.add(groupingIdBitSet); repeatColumnRefList.add(repeatColumnRef); } ColumnRefOperator grouping = columnRefFactory.create(GROUPING_ID, Type.BIGINT, false); List<Long> groupingID = new ArrayList<>(); for (BitSet bitSet : groupingIdsBitSets) { long gid = Utils.convertBitSetToLong(bitSet, groupByColumnRefs.size()); while (groupingID.contains(gid)) { gid += Math.pow(2, groupByColumnRefs.size()); } groupingID.add(gid); } groupingIds.add(groupingID); groupByColumnRefs.add(grouping); repeatOutput.add(grouping); for (Expr groupingFunction : groupingFunctionCallExprs) { grouping = columnRefFactory.create(GROUPING, Type.BIGINT, false); ArrayList<BitSet> tempGroupingIdsBitSets = new ArrayList<>(); for (int i = 0; i < repeatColumnRefList.size(); ++i) { tempGroupingIdsBitSets.add(new BitSet(groupingFunction.getChildren().size())); } for (int childIdx = 0; childIdx < groupingFunction.getChildren().size(); ++childIdx) { SlotRef slotRef = (SlotRef) groupingFunction.getChild(childIdx); ColumnRefOperator groupingKey = (ColumnRefOperator) SqlToScalarOperatorTranslator .translate(slotRef, subOpt.getExpressionMapping(), columnRefFactory); for (List<ColumnRefOperator> repeatColumns : repeatColumnRefList) { if (repeatColumns.contains(groupingKey)) { for (int repeatColIdx = 0; repeatColIdx < repeatColumnRefList.size(); ++repeatColIdx) { tempGroupingIdsBitSets.get(repeatColIdx).set(childIdx, groupingIdsBitSets.get(repeatColIdx) .get(groupByColumnRefs.indexOf(groupingKey))); } } } } groupingTranslations.put(groupingFunction, grouping); groupingIds.add(tempGroupingIdsBitSets.stream().map(bitset -> Utils.convertBitSetToLong(bitset, groupingFunction.getChildren().size())) .collect(Collectors.toList())); groupByColumnRefs.add(grouping); repeatOutput.add(grouping); } LogicalRepeatOperator repeatOperator = new LogicalRepeatOperator(repeatOutput, repeatColumnRefList, groupingIds); subOpt = new OptExprBuilder(repeatOperator, Lists.newArrayList(subOpt), groupingTranslations); } return new OptExprBuilder( new LogicalAggregationOperator(AggType.GLOBAL, groupByColumnRefs, aggregationsMap), Lists.newArrayList(subOpt), groupingTranslations); } private Expr replaceExprBottomUp(Expr root, Expr pattern, Expr replace) { if (root.getChildren().size() > 0) { for (int i = 0; i < root.getChildren().size(); i++) { Expr result = replaceExprBottomUp(root.getChild(i), pattern, replace); root.setChild(i, result); } } if (root.equals(pattern)) { return replace; } return root; } private OptExprBuilder sort(OptExprBuilder subOpt, List<OrderByElement> orderByExpressions, List<ColumnRefOperator> orderByColumns) { if (orderByExpressions.isEmpty()) { return subOpt; } List<Ordering> orderings = new ArrayList<>(); for (OrderByElement item : orderByExpressions) { if (item.getExpr().isLiteral()) { continue; } ColumnRefOperator column = (ColumnRefOperator) SqlToScalarOperatorTranslator.translate(item.getExpr(), subOpt.getExpressionMapping(), columnRefFactory); Ordering ordering = new Ordering(column, item.getIsAsc(), OrderByElement.nullsFirst(item.getNullsFirstParam())); if (!orderByColumns.contains(column)) { orderings.add(ordering); orderByColumns.add(column); } } if (orderByColumns.isEmpty()) { return subOpt; } LogicalTopNOperator sortOperator = new LogicalTopNOperator(orderings); return subOpt.withNewRoot(sortOperator); } private OptExprBuilder distinct(OptExprBuilder subOpt, boolean isDistinct, List<Expr> outputExpressions) { if (isDistinct) { subOpt = project(subOpt, outputExpressions); List<ColumnRefOperator> groupByColumns = Lists.newArrayList(); for (Expr expr : outputExpressions) { ColumnRefOperator column = (ColumnRefOperator) SqlToScalarOperatorTranslator .translate(expr, subOpt.getExpressionMapping(), columnRefFactory); if (!groupByColumns.contains(column)) { groupByColumns.add(column); } } return subOpt.withNewRoot( new LogicalAggregationOperator(AggType.GLOBAL, groupByColumns, new HashMap<>())); } else { return subOpt; } } private ColumnRefOperator getOrCreateColumnRefOperator(Expr expression, ScalarOperator scalarOperator, Map<ColumnRefOperator, ScalarOperator> projections) { ColumnRefOperator columnRefOperator; if (scalarOperator.isColumnRef()) { columnRefOperator = (ColumnRefOperator) scalarOperator; } else if (scalarOperator.isVariable() && projections.containsValue(scalarOperator)) { columnRefOperator = projections.entrySet().stream() .filter(e -> scalarOperator.equals(e.getValue())) .findAny() .map(Map.Entry::getKey) .orElse(null); Preconditions.checkNotNull(columnRefOperator); } else { columnRefOperator = columnRefFactory.create(expression, expression.getType(), scalarOperator.isNullable()); } return columnRefOperator; } }
Wont this throw NPE?
private static void deferConfigChangesForClustersToBeRestarted(List<ConfigChangeAction> actions, VespaModel model) { Set<ClusterSpec.Id> clustersToBeRestarted = actions.stream() .filter(action -> action.getType() == ConfigChangeAction.Type.RESTART) .map(action -> action.clusterId()) .collect(Collectors.toSet()); for (var clusterToRestart : clustersToBeRestarted) { var containerCluster = model.getContainerClusters().get(clusterToRestart.value()); if (containerCluster != null) containerCluster.deferChangesUntilRestart(); var contentCluster = model.getContentClusters().get(clusterToRestart.value()); if (contentCluster != null) contentCluster.deferChangesUntilRestart(); } }
var containerCluster = model.getContainerClusters().get(clusterToRestart.value());
private static void deferConfigChangesForClustersToBeRestarted(List<ConfigChangeAction> actions, VespaModel model) { Set<ClusterSpec.Id> clustersToBeRestarted = actions.stream() .filter(action -> action.getType() == ConfigChangeAction.Type.RESTART) .filter(action -> action.clusterId() != null) .map(action -> action.clusterId()) .collect(Collectors.toSet()); for (var clusterToRestart : clustersToBeRestarted) { var containerCluster = model.getContainerClusters().get(clusterToRestart.value()); if (containerCluster != null) containerCluster.deferChangesUntilRestart(); var contentCluster = model.getContentClusters().get(clusterToRestart.value()); if (contentCluster != null) contentCluster.deferChangesUntilRestart(); } }
class Validation { /** * Validates the model supplied, and if there already exists a model for the application validates changes * between the previous and current model * * @return a list of required changes needed to make this configuration live */ public static List<ConfigChangeAction> validate(VespaModel model, ValidationParameters validationParameters, DeployState deployState) { if (validationParameters.checkRouting()) { new RoutingValidator().validate(model, deployState); new RoutingSelectorValidator().validate(model, deployState); } new ComponentValidator().validate(model, deployState); new SearchDataTypeValidator().validate(model, deployState); new ComplexAttributeFieldsValidator().validate(model, deployState); new StreamingValidator().validate(model, deployState); new RankSetupValidator(validationParameters.ignoreValidationErrors()).validate(model, deployState); new NoPrefixForIndexes().validate(model, deployState); new DeploymentSpecValidator().validate(model, deployState); new RankingConstantsValidator().validate(model, deployState); new SecretStoreValidator().validate(model, deployState); new EndpointCertificateSecretsValidator().validate(model, deployState); new AccessControlFilterValidator().validate(model, deployState); new CloudWatchValidator().validate(model, deployState); new AwsAccessControlValidator().validate(model, deployState); new QuotaValidator().validate(model, deployState); new UriBindingsValidator().validate(model, deployState); List<ConfigChangeAction> result = Collections.emptyList(); if (deployState.getProperties().isFirstTimeDeployment()) { validateFirstTimeDeployment(model, deployState); } else { Optional<Model> currentActiveModel = deployState.getPreviousModel(); if (currentActiveModel.isPresent() && (currentActiveModel.get() instanceof VespaModel)) { result = validateChanges((VespaModel) currentActiveModel.get(), model, deployState.validationOverrides(), deployState.getDeployLogger(), deployState.now()); deferConfigChangesForClustersToBeRestarted(result, model); } } return result; } private static List<ConfigChangeAction> validateChanges(VespaModel currentModel, VespaModel nextModel, ValidationOverrides overrides, DeployLogger logger, Instant now) { ChangeValidator[] validators = new ChangeValidator[] { new IndexingModeChangeValidator(), new GlobalDocumentChangeValidator(), new IndexedSearchClusterChangeValidator(), new StreamingSearchClusterChangeValidator(), new ConfigValueChangeValidator(logger), new StartupCommandChangeValidator(), new ContentTypeRemovalValidator(), new ContentClusterRemovalValidator(), new ClusterSizeReductionValidator(), new ResourcesReductionValidator(), new ContainerRestartValidator(), new NodeResourceChangeValidator() }; return Arrays.stream(validators) .flatMap(v -> v.validate(currentModel, nextModel, overrides, now).stream()) .collect(toList()); } private static void validateFirstTimeDeployment(VespaModel model, DeployState deployState) { new AccessControlOnFirstDeploymentValidator().validate(model, deployState); } }
class Validation { /** * Validates the model supplied, and if there already exists a model for the application validates changes * between the previous and current model * * @return a list of required changes needed to make this configuration live */ public static List<ConfigChangeAction> validate(VespaModel model, ValidationParameters validationParameters, DeployState deployState) { if (validationParameters.checkRouting()) { new RoutingValidator().validate(model, deployState); new RoutingSelectorValidator().validate(model, deployState); } new ComponentValidator().validate(model, deployState); new SearchDataTypeValidator().validate(model, deployState); new ComplexAttributeFieldsValidator().validate(model, deployState); new StreamingValidator().validate(model, deployState); new RankSetupValidator(validationParameters.ignoreValidationErrors()).validate(model, deployState); new NoPrefixForIndexes().validate(model, deployState); new DeploymentSpecValidator().validate(model, deployState); new RankingConstantsValidator().validate(model, deployState); new SecretStoreValidator().validate(model, deployState); new EndpointCertificateSecretsValidator().validate(model, deployState); new AccessControlFilterValidator().validate(model, deployState); new CloudWatchValidator().validate(model, deployState); new AwsAccessControlValidator().validate(model, deployState); new QuotaValidator().validate(model, deployState); new UriBindingsValidator().validate(model, deployState); List<ConfigChangeAction> result = Collections.emptyList(); if (deployState.getProperties().isFirstTimeDeployment()) { validateFirstTimeDeployment(model, deployState); } else { Optional<Model> currentActiveModel = deployState.getPreviousModel(); if (currentActiveModel.isPresent() && (currentActiveModel.get() instanceof VespaModel)) { result = validateChanges((VespaModel) currentActiveModel.get(), model, deployState.validationOverrides(), deployState.getDeployLogger(), deployState.now()); deferConfigChangesForClustersToBeRestarted(result, model); } } return result; } private static List<ConfigChangeAction> validateChanges(VespaModel currentModel, VespaModel nextModel, ValidationOverrides overrides, DeployLogger logger, Instant now) { ChangeValidator[] validators = new ChangeValidator[] { new IndexingModeChangeValidator(), new GlobalDocumentChangeValidator(), new IndexedSearchClusterChangeValidator(), new StreamingSearchClusterChangeValidator(), new ConfigValueChangeValidator(logger), new StartupCommandChangeValidator(), new ContentTypeRemovalValidator(), new ContentClusterRemovalValidator(), new ClusterSizeReductionValidator(), new ResourcesReductionValidator(), new ContainerRestartValidator(), new NodeResourceChangeValidator() }; return Arrays.stream(validators) .flatMap(v -> v.validate(currentModel, nextModel, overrides, now).stream()) .collect(toList()); } private static void validateFirstTimeDeployment(VespaModel model, DeployState deployState) { new AccessControlOnFirstDeploymentValidator().validate(model, deployState); } }
Yes, I forgot about that. Excellent catch!
private static void deferConfigChangesForClustersToBeRestarted(List<ConfigChangeAction> actions, VespaModel model) { Set<ClusterSpec.Id> clustersToBeRestarted = actions.stream() .filter(action -> action.getType() == ConfigChangeAction.Type.RESTART) .map(action -> action.clusterId()) .collect(Collectors.toSet()); for (var clusterToRestart : clustersToBeRestarted) { var containerCluster = model.getContainerClusters().get(clusterToRestart.value()); if (containerCluster != null) containerCluster.deferChangesUntilRestart(); var contentCluster = model.getContentClusters().get(clusterToRestart.value()); if (contentCluster != null) contentCluster.deferChangesUntilRestart(); } }
var containerCluster = model.getContainerClusters().get(clusterToRestart.value());
private static void deferConfigChangesForClustersToBeRestarted(List<ConfigChangeAction> actions, VespaModel model) { Set<ClusterSpec.Id> clustersToBeRestarted = actions.stream() .filter(action -> action.getType() == ConfigChangeAction.Type.RESTART) .filter(action -> action.clusterId() != null) .map(action -> action.clusterId()) .collect(Collectors.toSet()); for (var clusterToRestart : clustersToBeRestarted) { var containerCluster = model.getContainerClusters().get(clusterToRestart.value()); if (containerCluster != null) containerCluster.deferChangesUntilRestart(); var contentCluster = model.getContentClusters().get(clusterToRestart.value()); if (contentCluster != null) contentCluster.deferChangesUntilRestart(); } }
class Validation { /** * Validates the model supplied, and if there already exists a model for the application validates changes * between the previous and current model * * @return a list of required changes needed to make this configuration live */ public static List<ConfigChangeAction> validate(VespaModel model, ValidationParameters validationParameters, DeployState deployState) { if (validationParameters.checkRouting()) { new RoutingValidator().validate(model, deployState); new RoutingSelectorValidator().validate(model, deployState); } new ComponentValidator().validate(model, deployState); new SearchDataTypeValidator().validate(model, deployState); new ComplexAttributeFieldsValidator().validate(model, deployState); new StreamingValidator().validate(model, deployState); new RankSetupValidator(validationParameters.ignoreValidationErrors()).validate(model, deployState); new NoPrefixForIndexes().validate(model, deployState); new DeploymentSpecValidator().validate(model, deployState); new RankingConstantsValidator().validate(model, deployState); new SecretStoreValidator().validate(model, deployState); new EndpointCertificateSecretsValidator().validate(model, deployState); new AccessControlFilterValidator().validate(model, deployState); new CloudWatchValidator().validate(model, deployState); new AwsAccessControlValidator().validate(model, deployState); new QuotaValidator().validate(model, deployState); new UriBindingsValidator().validate(model, deployState); List<ConfigChangeAction> result = Collections.emptyList(); if (deployState.getProperties().isFirstTimeDeployment()) { validateFirstTimeDeployment(model, deployState); } else { Optional<Model> currentActiveModel = deployState.getPreviousModel(); if (currentActiveModel.isPresent() && (currentActiveModel.get() instanceof VespaModel)) { result = validateChanges((VespaModel) currentActiveModel.get(), model, deployState.validationOverrides(), deployState.getDeployLogger(), deployState.now()); deferConfigChangesForClustersToBeRestarted(result, model); } } return result; } private static List<ConfigChangeAction> validateChanges(VespaModel currentModel, VespaModel nextModel, ValidationOverrides overrides, DeployLogger logger, Instant now) { ChangeValidator[] validators = new ChangeValidator[] { new IndexingModeChangeValidator(), new GlobalDocumentChangeValidator(), new IndexedSearchClusterChangeValidator(), new StreamingSearchClusterChangeValidator(), new ConfigValueChangeValidator(logger), new StartupCommandChangeValidator(), new ContentTypeRemovalValidator(), new ContentClusterRemovalValidator(), new ClusterSizeReductionValidator(), new ResourcesReductionValidator(), new ContainerRestartValidator(), new NodeResourceChangeValidator() }; return Arrays.stream(validators) .flatMap(v -> v.validate(currentModel, nextModel, overrides, now).stream()) .collect(toList()); } private static void validateFirstTimeDeployment(VespaModel model, DeployState deployState) { new AccessControlOnFirstDeploymentValidator().validate(model, deployState); } }
class Validation { /** * Validates the model supplied, and if there already exists a model for the application validates changes * between the previous and current model * * @return a list of required changes needed to make this configuration live */ public static List<ConfigChangeAction> validate(VespaModel model, ValidationParameters validationParameters, DeployState deployState) { if (validationParameters.checkRouting()) { new RoutingValidator().validate(model, deployState); new RoutingSelectorValidator().validate(model, deployState); } new ComponentValidator().validate(model, deployState); new SearchDataTypeValidator().validate(model, deployState); new ComplexAttributeFieldsValidator().validate(model, deployState); new StreamingValidator().validate(model, deployState); new RankSetupValidator(validationParameters.ignoreValidationErrors()).validate(model, deployState); new NoPrefixForIndexes().validate(model, deployState); new DeploymentSpecValidator().validate(model, deployState); new RankingConstantsValidator().validate(model, deployState); new SecretStoreValidator().validate(model, deployState); new EndpointCertificateSecretsValidator().validate(model, deployState); new AccessControlFilterValidator().validate(model, deployState); new CloudWatchValidator().validate(model, deployState); new AwsAccessControlValidator().validate(model, deployState); new QuotaValidator().validate(model, deployState); new UriBindingsValidator().validate(model, deployState); List<ConfigChangeAction> result = Collections.emptyList(); if (deployState.getProperties().isFirstTimeDeployment()) { validateFirstTimeDeployment(model, deployState); } else { Optional<Model> currentActiveModel = deployState.getPreviousModel(); if (currentActiveModel.isPresent() && (currentActiveModel.get() instanceof VespaModel)) { result = validateChanges((VespaModel) currentActiveModel.get(), model, deployState.validationOverrides(), deployState.getDeployLogger(), deployState.now()); deferConfigChangesForClustersToBeRestarted(result, model); } } return result; } private static List<ConfigChangeAction> validateChanges(VespaModel currentModel, VespaModel nextModel, ValidationOverrides overrides, DeployLogger logger, Instant now) { ChangeValidator[] validators = new ChangeValidator[] { new IndexingModeChangeValidator(), new GlobalDocumentChangeValidator(), new IndexedSearchClusterChangeValidator(), new StreamingSearchClusterChangeValidator(), new ConfigValueChangeValidator(logger), new StartupCommandChangeValidator(), new ContentTypeRemovalValidator(), new ContentClusterRemovalValidator(), new ClusterSizeReductionValidator(), new ResourcesReductionValidator(), new ContainerRestartValidator(), new NodeResourceChangeValidator() }; return Arrays.stream(validators) .flatMap(v -> v.validate(currentModel, nextModel, overrides, now).stream()) .collect(toList()); } private static void validateFirstTimeDeployment(VespaModel model, DeployState deployState) { new AccessControlOnFirstDeploymentValidator().validate(model, deployState); } }
`enqueued` might be positive even when the actual queue is empty (when multiple threads executes `enqueueAndDispatch` simultaneously.
private void enqueueAndDispatch(HttpRequest request, ResponseHandler handler, Supplier<Operation> operationParser) { if (enqueued.incrementAndGet() > maxThrottled) { enqueued.decrementAndGet(); overload(request, "Rejecting execution due to overload: " + maxThrottled + " requests already enqueued", handler); return; } Operation operation = Operation.lazilyParsed(request, handler, operationParser); if (enqueued.get() == 1 && operation.dispatch()) enqueued.decrementAndGet(); else { operations.offer(operation); dispatchFirst(); } }
if (enqueued.get() == 1 && operation.dispatch())
private void enqueueAndDispatch(HttpRequest request, ResponseHandler handler, Supplier<Operation> operationParser) { if (enqueued.incrementAndGet() > maxThrottled) { enqueued.decrementAndGet(); overload(request, "Rejecting execution due to overload: " + maxThrottled + " requests already enqueued", handler); return; } operations.offer(Operation.lazilyParsed(request, handler, operationParser)); dispatchFirst(); }
class DocumentV1ApiHandler extends AbstractRequestHandler { private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName()); private static final Parser<Integer> numberParser = Integer::parseInt; private static final Parser<Boolean> booleanParser = Boolean::parseBoolean; private static final CompletionHandler logException = new CompletionHandler() { @Override public void completed() { } @Override public void failed(Throwable t) { log.log(FINE, "Exception writing or closing response data", t); } }; private static final ContentChannel ignoredContent = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { handler.completed(); } @Override public void close(CompletionHandler handler) { handler.completed(); } }; private static final JsonFactory jsonFactory = new JsonFactory(); private static final Duration requestTimeout = Duration.ofSeconds(175); private static final Duration visitTimeout = Duration.ofSeconds(120); private static final String CREATE = "create"; private static final String CONDITION = "condition"; private static final String ROUTE = "route"; private static final String FIELD_SET = "fieldSet"; private static final String SELECTION = "selection"; private static final String CLUSTER = "cluster"; private static final String CONTINUATION = "continuation"; private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount"; private static final String CONCURRENCY = "concurrency"; private static final String BUCKET_SPACE = "bucketSpace"; private final Clock clock; private final Metric metric; private final DocumentApiMetrics metrics; private final DocumentOperationParser parser; private final long maxThrottled; private final DocumentAccess access; private final AsyncSession asyncSession; private final Map<String, StorageCluster> clusters; private final Deque<Operation> operations; private final AtomicLong enqueued = new AtomicLong(); private final Map<VisitorControlHandler, VisitorSession> visits = new ConcurrentHashMap<>(); private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-")); private final Map<String, Map<Method, Handler>> handlers = defineApi(); @Inject public DocumentV1ApiHandler(Metric metric, MetricReceiver metricReceiver, VespaDocumentAccess documentAccess, DocumentmanagerConfig documentManagerConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig, DocumentOperationExecutorConfig executorConfig) { this(Clock.systemUTC(), metric, metricReceiver, documentAccess, documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig); } DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access, DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) { this.clock = clock; this.parser = new DocumentOperationParser(documentmanagerConfig); this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = executorConfig.maxThrottled(); this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = parseClusters(clusterListConfig, bucketSpacesConfig); this.operations = new ConcurrentLinkedDeque<>(); this.executor.scheduleWithFixedDelay(this::dispatchEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); } DocumentV1ApiHandler(Clock clock, DocumentOperationParser parser, Metric metric, MetricReceiver metricReceiver, int maxThrottled, DocumentAccess access, Map<String, StorageCluster> clusters) { this.clock = clock; this.parser = parser; this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = maxThrottled; this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = clusters; this.operations = new ConcurrentLinkedDeque<>(); this.executor.scheduleWithFixedDelay(this::dispatchEnqueued, 10, 10, TimeUnit.MILLISECONDS); } @Override public ContentChannel handleRequest(Request rawRequest, ResponseHandler rawResponseHandler) { rawRequest.setTimeout(requestTimeout.toMillis(), TimeUnit.MILLISECONDS); HandlerMetricContextUtil.onHandle(rawRequest, metric, getClass()); ResponseHandler responseHandler = response -> { HandlerMetricContextUtil.onHandled(rawRequest, metric, getClass()); return rawResponseHandler.handleResponse(response); }; HttpRequest request = (HttpRequest) rawRequest; try { Path requestPath = new Path(request.getUri()); for (String path : handlers.keySet()) if (requestPath.matches(path)) { Map<Method, Handler> methods = handlers.get(path); if (methods.containsKey(request.getMethod())) return methods.get(request.getMethod()).handle(request, new DocumentPath(requestPath), responseHandler); if (request.getMethod() == OPTIONS) options(methods.keySet(), responseHandler); methodNotAllowed(request, methods.keySet(), responseHandler); } notFound(request, handlers.keySet(), responseHandler); } catch (IllegalArgumentException e) { badRequest(request, e, responseHandler); } catch (RuntimeException e) { serverError(request, e, responseHandler); } return ignoredContent; } @Override public void handleTimeout(Request request, ResponseHandler responseHandler) { timeout((HttpRequest) request, "Request timeout after " + requestTimeout, responseHandler); } @Override public void destroy() { executor.shutdown(); visits.values().forEach(VisitorSession::destroy); try { if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) { executor.shutdownNow(); if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) log.log(WARNING, "Failed shutting down /document/v1 executor within 20 seconds"); } } catch (InterruptedException e) { log.log(WARNING, "Interrupted waiting for /document/v1 executor to shut down"); } } @FunctionalInterface interface Handler { ContentChannel handle(HttpRequest request, DocumentPath path, ResponseHandler handler); } /** Defines all paths/methods handled by this handler. */ private Map<String, Map<Method, Handler>> defineApi() { Map<String, Map<Method, Handler>> handlers = new LinkedHashMap<>(); handlers.put("/document/v1/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/{docid}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/{docid}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/{docid}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); return Collections.unmodifiableMap(handlers); } private ContentChannel getDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); return () -> { visit(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { DocumentOperationParameters rawParameters = parameters(); rawParameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).route()) .map(rawParameters::withRoute) .orElse(rawParameters); rawParameters = getProperty(request, FIELD_SET).map(rawParameters::withFieldSet) .orElse(rawParameters); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> { handle(path, handler, response, (document, jsonResponse) -> { if (document != null) { jsonResponse.writeSingleDocument(document); jsonResponse.commit(Response.Status.OK); } else jsonResponse.commit(Response.Status.NOT_FOUND); }); }); return () -> dispatchOperation(request, handler, () -> asyncSession.get(path.id(), parameters)); }); return ignoredContent; } private ContentChannel postDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentPut put = parser.parsePut(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(put::setCondition); DocumentOperationParameters rawParameters = getProperty(request, ROUTE).map(parameters()::withRoute) .orElse(parameters()); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(request, handler, () -> asyncSession.put(put, parameters)); }); }); } private ContentChannel putDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentUpdate update = parser.parseUpdate(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(update::setCondition); getProperty(request, CREATE).map(booleanParser::parse).ifPresent(update::setCreateIfNonExistent); DocumentOperationParameters rawParameters = getProperty(request, ROUTE).map(parameters()::withRoute) .orElse(parameters()); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(request, handler, () -> asyncSession.update(update, parameters)); }); }); } private ContentChannel deleteDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.REMOVE, clock.instant()); enqueueAndDispatch(request, handler, () -> { DocumentRemove remove = new DocumentRemove(path.id()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(remove::setCondition); DocumentOperationParameters rawParameters = getProperty(request, ROUTE).map(parameters()::withRoute) .orElse(parameters()); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(request, handler, () -> asyncSession.remove(remove, parameters)); }); return ignoredContent; } /** Dispatches enqueued requests until one is blocked. */ void dispatchEnqueued() { try { while (dispatchFirst()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued operations, and returns whether this was successful. */ private boolean dispatchFirst() { Operation operation = operations.poll(); if (operation == null) return false; if (operation.dispatch()) { enqueued.decrementAndGet(); return true; } operations.push(operation); return false; } /** * Enqueues the given request and operation, or responds with "overload" if the queue is full, * and then attempts to dispatch an enqueued operation from the head of the queue. */ /** Class for writing and returning JSON responses to document operations in a thread safe manner. */ private static class JsonResponse implements AutoCloseable { private final BufferedContentChannel buffer = new BufferedContentChannel(); private final OutputStream out = new ContentChannelOutputStream(buffer); private final JsonGenerator json = jsonFactory.createGenerator(out); private final ResponseHandler handler; private ContentChannel channel; private JsonResponse(ResponseHandler handler) throws IOException { this.handler = handler; json.writeStartObject(); } /** Creates a new JsonResponse with path and id fields written. */ static JsonResponse create(DocumentPath path, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(path.rawPath()); response.writeDocId(path.id()); return response; } /** Creates a new JsonResponse with path field written. */ static JsonResponse create(HttpRequest request, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); return response; } /** Creates a new JsonResponse with path and message fields written. */ static JsonResponse create(HttpRequest request, String message, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); response.writeMessage(message); return response; } /** Commits a response with the given status code and some default headers, and writes whatever content is buffered. */ synchronized void commit(int status) throws IOException { Response response = new Response(status); response.headers().addAll(Map.of("Content-Type", List.of("application/json; charset=UTF-8"))); try { channel = handler.handleResponse(response); buffer.connectTo(channel); } catch (RuntimeException e) { throw new IOException(e); } } /** Commits a response with the given status code and some default headers, writes buffered content, and closes this. */ synchronized void respond(int status) throws IOException { try (this) { commit(status); } } /** Closes the JSON and the output content channel of this. */ @Override public synchronized void close() throws IOException { try { if (channel == null) { log.log(WARNING, "Close called before response was committed, in " + getClass().getName()); commit(Response.Status.INTERNAL_SERVER_ERROR); } json.close(); out.close(); } finally { if (channel != null) channel.close(logException); } } synchronized void writePathId(String path) throws IOException { json.writeStringField("pathId", path); } synchronized void writeMessage(String message) throws IOException { json.writeStringField("message", message); } synchronized void writeDocId(DocumentId id) throws IOException { json.writeStringField("id", id.toString()); } synchronized void writeSingleDocument(Document document) throws IOException { new JsonWriter(json).writeFields(document); } synchronized void writeDocumentsArrayStart() throws IOException { json.writeArrayFieldStart("documents"); } synchronized void writeDocumentValue(Document document) throws IOException { new JsonWriter(json).write(document); } synchronized void writeArrayEnd() throws IOException { json.writeEndArray(); } synchronized void writeContinuation(String token) throws IOException { json.writeStringField("continuation", token); } } private static void options(Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { Response response = new Response(Response.Status.NO_CONTENT); response.headers().add("Allow", methods.stream().sorted().map(Method::name).collect(joining(","))); handler.handleResponse(response).close(logException); }); } private static void badRequest(HttpRequest request, IllegalArgumentException e, ResponseHandler handler) { loggingException(() -> { String message = Exceptions.toMessageString(e); log.log(FINE, () -> "Bad request for " + request.getMethod() + " at " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.BAD_REQUEST); }); } private static void notFound(HttpRequest request, Collection<String> paths, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "Nothing at '" + request.getUri().getRawPath() + "'. " + "Available paths are:\n" + String.join("\n", paths), handler) .respond(Response.Status.NOT_FOUND); }); } private static void methodNotAllowed(HttpRequest request, Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "'" + request.getMethod() + "' not allowed at '" + request.getUri().getRawPath() + "'. " + "Allowed methods are: " + methods.stream().sorted().map(Method::name).collect(joining(", ")), handler) .respond(Response.Status.METHOD_NOT_ALLOWED); }); } private static void overload(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Overload handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.TOO_MANY_REQUESTS); }); } private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) { loggingException(() -> { log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t); JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR); }); } private static void timeout(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.GATEWAY_TIMEOUT); }); } private static void loggingException(Exceptions.RunnableThrowingIOException runnable) { try { runnable.run(); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } @FunctionalInterface interface Operation { /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ boolean dispatch(); /** Wraps the operation parser in an Operation that is parsed the first time it is attempted dispatched. */ static Operation lazilyParsed(HttpRequest request, ResponseHandler handler, Supplier<Operation> parser) { AtomicReference<Operation> operation = new AtomicReference<>(); return () -> { try { return operation.updateAndGet(value -> value != null ? value : parser.get()).dispatch(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } return true; }; } } /** Attempts to send the given document operation, returning false if thes needs to be retried. */ private static boolean dispatchOperation(HttpRequest request, ResponseHandler handler, Supplier<Result> documentOperation) { if (request.isCancelled()) return true; Result result = documentOperation.get(); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) serverError(request, result.getError(), handler); return true; } /** Readable content channel which forwards data to a reader when closed. */ static class ForwardingContentChannel implements ContentChannel { private final ReadableContentChannel delegate = new ReadableContentChannel(); private final Consumer<InputStream> reader; public ForwardingContentChannel(Consumer<InputStream> reader) { this.reader = reader; } /** Write is complete when we have stored the buffer — call completion handler. */ @Override public void write(ByteBuffer buf, CompletionHandler handler) { try { delegate.write(buf, logException); handler.completed(); } catch (Exception e) { handler.failed(e); } } /** Close is complete when we have close the buffer. */ @Override public void close(CompletionHandler handler) { try { delegate.close(logException); try (UnsafeContentInputStream in = new UnsafeContentInputStream(delegate)) { reader.accept(in); } handler.completed(); } catch (Exception e) { handler.failed(e); } } } static class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } DocumentPut parsePut(InputStream inputStream, String docId) { return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT); } DocumentUpdate parseUpdate(InputStream inputStream, String docId) { return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE); } private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } } interface SuccessCallback { void onSuccess(Document document, JsonResponse response) throws IOException; } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response, SuccessCallback callback) { try (JsonResponse jsonResponse = JsonResponse.create(path, handler)) { if (response.isSuccess()) callback.onSuccess((response instanceof DocumentResponse) ? ((DocumentResponse) response).getDocument() : null, jsonResponse); else { jsonResponse.writeMessage(response.getTextMessage()); switch (response.outcome()) { case NOT_FOUND: jsonResponse.commit(Response.Status.NOT_FOUND); break; case CONDITION_FAILED: jsonResponse.commit(Response.Status.PRECONDITION_FAILED); break; case INSUFFICIENT_STORAGE: log.log(WARNING, "Insufficient storage left in cluster: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE); break; default: log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'"); case ERROR: log.log(WARNING, "Exception performing document operation: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR); } } } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response) { handle(path, handler, response, (document, jsonResponse) -> jsonResponse.commit(Response.Status.OK)); } private VisitorParameters parseParameters(HttpRequest request, DocumentPath path) { int wantedDocumentCount = Math.min(1 << 10, getProperty(request, WANTED_DOCUMENT_COUNT, numberParser).orElse(1 << 10)); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); int concurrency = Math.min(100, getProperty(request, CONCURRENCY, numberParser).orElse(1)); if (concurrency <= 0) throw new IllegalArgumentException("concurrency must be positive"); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = new VisitorParameters(Stream.of(getProperty(request, SELECTION), path.documentType(), path.namespace().map(value -> "id.namespace=='" + value + "'"), path.group().map(Group::selection)) .flatMap(Optional::stream) .reduce(new StringJoiner(") and (", "(", ")").setEmptyValue(""), StringJoiner::add, StringJoiner::merge) .toString()); getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency)); parameters.setTimeoutMs(visitTimeout.toMillis()); parameters.visitInconsistentBuckets(true); parameters.setPriority(DocumentProtocol.Priority.NORMAL_4); StorageCluster storageCluster = resolveCluster(cluster, clusters); parameters.setRoute(storageCluster.route()); parameters.setBucketSpace(resolveBucket(storageCluster, path.documentType(), List.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()), getProperty(request, BUCKET_SPACE))); return parameters; } private void visit(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { try { JsonResponse response = JsonResponse.create(request, handler); response.writeDocumentsArrayStart(); CountDownLatch latch = new CountDownLatch(1); parameters.setLocalDataHandler(new DumpVisitorDataHandler() { @Override public void onDocument(Document doc, long timeStamp) { loggingException(() -> { response.writeDocumentValue(doc); }); } @Override public void onRemove(DocumentId id) { } }); parameters.setControlHandler(new VisitorControlHandler() { @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); loggingException(() -> { response.writeArrayEnd(); switch (code) { case TIMEOUT: if ( ! hasVisitedAnyBuckets()) { response.writeMessage("No buckets visited within timeout of " + visitTimeout); response.respond(Response.Status.GATEWAY_TIMEOUT); break; } case SUCCESS: case ABORTED: if (getProgress() != null && ! getProgress().isFinished()) response.writeContinuation(getProgress().serializeToString()); response.respond(Response.Status.OK); break; default: response.writeMessage(message != null ? message : "Visiting failed"); response.respond(Response.Status.INTERNAL_SERVER_ERROR); } executor.execute(() -> { try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } visits.get(this).destroy(); }); }); } }); visits.put(parameters.getControlHandler(), access.createVisitorSession(parameters)); latch.countDown(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (ParseException e) { badRequest(request, new IllegalArgumentException(e), handler); } catch (RuntimeException e) { serverError(request, e, handler); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } static class VisitorOptions { final Optional<String> cluster; final Optional<String> namespace; final Optional<String> documentType; final Optional<Group> group; final Optional<String> selection; final Optional<String> fieldSet; final Optional<String> continuation; final Optional<String> bucketSpace; final Optional<Integer> wantedDocumentCount; final Optional<Integer> concurrency; private VisitorOptions(Optional<String> cluster, Optional<String> documentType, Optional<String> namespace, Optional<Group> group, Optional<String> selection, Optional<String> fieldSet, Optional<String> continuation, Optional<String> bucketSpace, Optional<Integer> wantedDocumentCount, Optional<Integer> concurrency) { this.cluster = cluster; this.namespace = namespace; this.documentType = documentType; this.group = group; this.selection = selection; this.fieldSet = fieldSet; this.continuation = continuation; this.bucketSpace = bucketSpace; this.wantedDocumentCount = wantedDocumentCount; this.concurrency = concurrency; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; VisitorOptions that = (VisitorOptions) o; return cluster.equals(that.cluster) && namespace.equals(that.namespace) && documentType.equals(that.documentType) && group.equals(that.group) && selection.equals(that.selection) && fieldSet.equals(that.fieldSet) && continuation.equals(that.continuation) && bucketSpace.equals(that.bucketSpace) && wantedDocumentCount.equals(that.wantedDocumentCount) && concurrency.equals(that.concurrency); } @Override public int hashCode() { return Objects.hash(cluster, namespace, documentType, group, selection, fieldSet, continuation, bucketSpace, wantedDocumentCount, concurrency); } @Override public String toString() { return "VisitorOptions{" + "cluster=" + cluster + ", namespace=" + namespace + ", documentType=" + documentType + ", group=" + group + ", selection=" + selection + ", fieldSet=" + fieldSet + ", continuation=" + continuation + ", bucketSpace=" + bucketSpace + ", wantedDocumentCount=" + wantedDocumentCount + ", concurrency=" + concurrency + '}'; } public static Builder builder() { return new Builder(); } public static class Builder { private String cluster; private String documentType; private String namespace; private Group group; private String selection; private String fieldSet; private String continuation; private String bucketSpace; private Integer wantedDocumentCount; private Integer concurrency; public Builder cluster(String cluster) { this.cluster = cluster; return this; } public Builder documentType(String documentType) { this.documentType = documentType; return this; } public Builder namespace(String namespace) { this.namespace = namespace; return this; } public Builder group(Group group) { this.group = group; return this; } public Builder selection(String selection) { this.selection = selection; return this; } public Builder fieldSet(String fieldSet) { this.fieldSet = fieldSet; return this; } public Builder continuation(String continuation) { this.continuation = continuation; return this; } public Builder bucketSpace(String bucketSpace) { this.bucketSpace = bucketSpace; return this; } public Builder wantedDocumentCount(Integer wantedDocumentCount) { this.wantedDocumentCount = wantedDocumentCount; return this; } public Builder concurrency(Integer concurrency) { this.concurrency = concurrency; return this; } public VisitorOptions build() { return new VisitorOptions(Optional.ofNullable(cluster), Optional.ofNullable(documentType), Optional.ofNullable(namespace), Optional.ofNullable(group), Optional.ofNullable(selection), Optional.ofNullable(fieldSet), Optional.ofNullable(continuation), Optional.ofNullable(bucketSpace), Optional.ofNullable(wantedDocumentCount), Optional.ofNullable(concurrency)); } } } private static Optional<String> getProperty(HttpRequest request, String name) { List<String> values = request.parameters().get(name); if (values != null && values.size() != 0) return Optional.ofNullable(values.get(values.size() - 1)); return Optional.empty(); } private static <T> Optional<T> getProperty(HttpRequest request, String name, Parser<T> parser) { return getProperty(request, name).map(parser::parse); } @FunctionalInterface interface Parser<T> extends Function<String, T> { default T parse(String value) { try { return apply(value); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing '" + value + "': " + Exceptions.toMessageString(e)); } } } private class MeasuringResponseHandler implements ResponseHandler { private final ResponseHandler delegate; private final com.yahoo.documentapi.metrics.DocumentOperationType type; private final Instant start; private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) { this.delegate = delegate; this.type = type; this.start = start; } @Override public ContentChannel handleResponse(Response response) { switch (response.getStatus() / 100) { case 2: metrics.reportSuccessful(type, start); break; case 4: metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR); break; case 5: metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR); break; } return delegate.handleResponse(response); } } static class StorageCluster { private final String name; private final String configId; private final Map<String, String> documentBuckets; StorageCluster(String name, String configId, Map<String, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String configId() { return configId; } String route() { return "[Storage:cluster=" + name() + ";clusterconfigid=" + configId() + "]"; } Optional<String> bucketOf(String documentType) { return Optional.ofNullable(documentBuckets.get(documentType)); } } private static Map<String, StorageCluster> parseClusters(ClusterListConfig clusters, AllClustersBucketSpacesConfig buckets) { return clusters.storage().stream() .collect(toUnmodifiableMap(storage -> storage.name(), storage -> new StorageCluster(storage.name(), storage.configid(), buckets.cluster(storage.name()) .documentType().entrySet().stream() .collect(toMap(entry -> entry.getKey(), entry -> entry.getValue().bucketSpace()))))); } static StorageCluster resolveCluster(Optional<String> wanted, Map<String, StorageCluster> clusters) { if (clusters.isEmpty()) throw new IllegalArgumentException("Your Vespa deployment has no content clusters, so the document API is not enabled"); return wanted.map(cluster -> { if ( ! clusters.containsKey(cluster)) throw new IllegalArgumentException("Your Vespa deployment has no content cluster '" + cluster + "', only '" + String.join("', '", clusters.keySet()) + "'"); return clusters.get(cluster); }).orElseGet(() -> { if (clusters.size() > 1) throw new IllegalArgumentException("Please specify one of the content clusters in your Vespa deployment: '" + String.join("', '", clusters.keySet()) + "'"); return clusters.values().iterator().next(); }); } static String resolveBucket(StorageCluster cluster, Optional<String> documentType, List<String> bucketSpaces, Optional<String> bucketSpace) { return documentType.map(type -> cluster.bucketOf(type) .orElseThrow(() -> new IllegalArgumentException("Document type '" + type + "' in cluster '" + cluster.name() + "' is not mapped to a known bucket space"))) .or(() -> bucketSpace.map(space -> { if ( ! bucketSpaces.contains(space)) throw new IllegalArgumentException("Bucket space '" + space + "' is not a known bucket space; expected one of " + String.join(", ", bucketSpaces)); return space; })) .orElse(FixedBucketSpaces.defaultSpace()); } private static class DocumentPath { private final Path path; private final Optional<Group> group; DocumentPath(Path path) { this.path = requireNonNull(path); this.group = Optional.ofNullable(path.get("number")).map(numberParser::parse).map(Group::of) .or(() -> Optional.ofNullable(path.get("group")).map(Group::of)); } DocumentId id() { return new DocumentId("id:" + requireNonNull(path.get("namespace")) + ":" + requireNonNull(path.get("documentType")) + ":" + group.map(Group::docIdPart).orElse("") + ":" + requireNonNull(path.get("docid"))); } String rawPath() { return path.asString(); } Optional<String> documentType() { return Optional.ofNullable(path.get("documentType")); } Optional<String> namespace() { return Optional.ofNullable(path.get("namespace")); } Optional<Group> group() { return group; } } static class Group { private final String value; private final String docIdPart; private final String selection; private Group(String value, String docIdPart, String selection) { Text.validateTextString(value) .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); }); this.value = value; this.docIdPart = docIdPart; this.selection = selection; } public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); } public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); } public String value() { return value; } public String docIdPart() { return docIdPart; } public String selection() { return selection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Group group = (Group) o; return value.equals(group.value) && docIdPart.equals(group.docIdPart) && selection.equals(group.selection); } @Override public int hashCode() { return Objects.hash(value, docIdPart, selection); } @Override public String toString() { return "Group{" + "value='" + value + '\'' + ", docIdPart='" + docIdPart + '\'' + ", selection='" + selection + '\'' + '}'; } } }
class DocumentV1ApiHandler extends AbstractRequestHandler { private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName()); private static final Parser<Integer> numberParser = Integer::parseInt; private static final Parser<Boolean> booleanParser = Boolean::parseBoolean; private static final CompletionHandler logException = new CompletionHandler() { @Override public void completed() { } @Override public void failed(Throwable t) { log.log(FINE, "Exception writing or closing response data", t); } }; private static final ContentChannel ignoredContent = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { handler.completed(); } @Override public void close(CompletionHandler handler) { handler.completed(); } }; private static final JsonFactory jsonFactory = new JsonFactory(); private static final Duration requestTimeout = Duration.ofSeconds(175); private static final Duration visitTimeout = Duration.ofSeconds(120); private static final String CREATE = "create"; private static final String CONDITION = "condition"; private static final String ROUTE = "route"; private static final String FIELD_SET = "fieldSet"; private static final String SELECTION = "selection"; private static final String CLUSTER = "cluster"; private static final String CONTINUATION = "continuation"; private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount"; private static final String CONCURRENCY = "concurrency"; private static final String BUCKET_SPACE = "bucketSpace"; private final Clock clock; private final Metric metric; private final DocumentApiMetrics metrics; private final DocumentOperationParser parser; private final long maxThrottled; private final DocumentAccess access; private final AsyncSession asyncSession; private final Map<String, StorageCluster> clusters; private final Deque<Operation> operations; private final AtomicLong enqueued = new AtomicLong(); private final Map<VisitorControlHandler, VisitorSession> visits = new ConcurrentHashMap<>(); private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-")); private final Map<String, Map<Method, Handler>> handlers = defineApi(); @Inject public DocumentV1ApiHandler(Metric metric, MetricReceiver metricReceiver, VespaDocumentAccess documentAccess, DocumentmanagerConfig documentManagerConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig, DocumentOperationExecutorConfig executorConfig) { this(Clock.systemUTC(), metric, metricReceiver, documentAccess, documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig); } DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access, DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) { this.clock = clock; this.parser = new DocumentOperationParser(documentmanagerConfig); this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = executorConfig.maxThrottled(); this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = parseClusters(clusterListConfig, bucketSpacesConfig); this.operations = new ConcurrentLinkedDeque<>(); this.executor.scheduleWithFixedDelay(this::dispatchEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); } DocumentV1ApiHandler(Clock clock, DocumentOperationParser parser, Metric metric, MetricReceiver metricReceiver, int maxThrottled, DocumentAccess access, Map<String, StorageCluster> clusters) { this.clock = clock; this.parser = parser; this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = maxThrottled; this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = clusters; this.operations = new ConcurrentLinkedDeque<>(); this.executor.scheduleWithFixedDelay(this::dispatchEnqueued, 10, 10, TimeUnit.MILLISECONDS); } @Override public ContentChannel handleRequest(Request rawRequest, ResponseHandler rawResponseHandler) { rawRequest.setTimeout(requestTimeout.toMillis(), TimeUnit.MILLISECONDS); HandlerMetricContextUtil.onHandle(rawRequest, metric, getClass()); ResponseHandler responseHandler = response -> { HandlerMetricContextUtil.onHandled(rawRequest, metric, getClass()); return rawResponseHandler.handleResponse(response); }; HttpRequest request = (HttpRequest) rawRequest; try { Path requestPath = new Path(request.getUri()); for (String path : handlers.keySet()) if (requestPath.matches(path)) { Map<Method, Handler> methods = handlers.get(path); if (methods.containsKey(request.getMethod())) return methods.get(request.getMethod()).handle(request, new DocumentPath(requestPath), responseHandler); if (request.getMethod() == OPTIONS) options(methods.keySet(), responseHandler); methodNotAllowed(request, methods.keySet(), responseHandler); } notFound(request, handlers.keySet(), responseHandler); } catch (IllegalArgumentException e) { badRequest(request, e, responseHandler); } catch (RuntimeException e) { serverError(request, e, responseHandler); } return ignoredContent; } @Override public void handleTimeout(Request request, ResponseHandler responseHandler) { timeout((HttpRequest) request, "Request timeout after " + requestTimeout, responseHandler); } @Override public void destroy() { executor.shutdown(); visits.values().forEach(VisitorSession::destroy); try { if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) { executor.shutdownNow(); if ( ! executor.awaitTermination(10, TimeUnit.SECONDS)) log.log(WARNING, "Failed shutting down /document/v1 executor within 20 seconds"); } } catch (InterruptedException e) { log.log(WARNING, "Interrupted waiting for /document/v1 executor to shut down"); } } @FunctionalInterface interface Handler { ContentChannel handle(HttpRequest request, DocumentPath path, ResponseHandler handler); } /** Defines all paths/methods handled by this handler. */ private Map<String, Map<Method, Handler>> defineApi() { Map<String, Map<Method, Handler>> handlers = new LinkedHashMap<>(); handlers.put("/document/v1/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/", Map.of(GET, this::getDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/{docid}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/{docid}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/{docid}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); return Collections.unmodifiableMap(handlers); } private ContentChannel getDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); return () -> { visit(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { DocumentOperationParameters rawParameters = parameters(); rawParameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).route()) .map(rawParameters::withRoute) .orElse(rawParameters); rawParameters = getProperty(request, FIELD_SET).map(rawParameters::withFieldSet) .orElse(rawParameters); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> { handle(path, handler, response, (document, jsonResponse) -> { if (document != null) { jsonResponse.writeSingleDocument(document); jsonResponse.commit(Response.Status.OK); } else jsonResponse.commit(Response.Status.NOT_FOUND); }); }); return () -> dispatchOperation(request, handler, () -> asyncSession.get(path.id(), parameters)); }); return ignoredContent; } private ContentChannel postDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentPut put = parser.parsePut(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(put::setCondition); DocumentOperationParameters rawParameters = getProperty(request, ROUTE).map(parameters()::withRoute) .orElse(parameters()); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(request, handler, () -> asyncSession.put(put, parameters)); }); }); } private ContentChannel putDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.UPDATE, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentUpdate update = parser.parseUpdate(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(update::setCondition); getProperty(request, CREATE).map(booleanParser::parse).ifPresent(update::setCreateIfNonExistent); DocumentOperationParameters rawParameters = getProperty(request, ROUTE).map(parameters()::withRoute) .orElse(parameters()); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(request, handler, () -> asyncSession.update(update, parameters)); }); }); } private ContentChannel deleteDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.REMOVE, clock.instant()); enqueueAndDispatch(request, handler, () -> { DocumentRemove remove = new DocumentRemove(path.id()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(remove::setCondition); DocumentOperationParameters rawParameters = getProperty(request, ROUTE).map(parameters()::withRoute) .orElse(parameters()); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> handle(path, handler, response)); return () -> dispatchOperation(request, handler, () -> asyncSession.remove(remove, parameters)); }); return ignoredContent; } /** Dispatches enqueued requests until one is blocked. */ void dispatchEnqueued() { try { while (dispatchFirst()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued operations, and returns whether this was successful. */ private boolean dispatchFirst() { Operation operation = operations.poll(); if (operation == null) return false; if (operation.dispatch()) { enqueued.decrementAndGet(); return true; } operations.push(operation); return false; } /** * Enqueues the given request and operation, or responds with "overload" if the queue is full, * and then attempts to dispatch an enqueued operation from the head of the queue. */ /** Class for writing and returning JSON responses to document operations in a thread safe manner. */ private static class JsonResponse implements AutoCloseable { private final BufferedContentChannel buffer = new BufferedContentChannel(); private final OutputStream out = new ContentChannelOutputStream(buffer); private final JsonGenerator json = jsonFactory.createGenerator(out); private final ResponseHandler handler; private ContentChannel channel; private JsonResponse(ResponseHandler handler) throws IOException { this.handler = handler; json.writeStartObject(); } /** Creates a new JsonResponse with path and id fields written. */ static JsonResponse create(DocumentPath path, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(path.rawPath()); response.writeDocId(path.id()); return response; } /** Creates a new JsonResponse with path field written. */ static JsonResponse create(HttpRequest request, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); return response; } /** Creates a new JsonResponse with path and message fields written. */ static JsonResponse create(HttpRequest request, String message, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); response.writeMessage(message); return response; } /** Commits a response with the given status code and some default headers, and writes whatever content is buffered. */ synchronized void commit(int status) throws IOException { Response response = new Response(status); response.headers().addAll(Map.of("Content-Type", List.of("application/json; charset=UTF-8"))); try { channel = handler.handleResponse(response); buffer.connectTo(channel); } catch (RuntimeException e) { throw new IOException(e); } } /** Commits a response with the given status code and some default headers, writes buffered content, and closes this. */ synchronized void respond(int status) throws IOException { try (this) { commit(status); } } /** Closes the JSON and the output content channel of this. */ @Override public synchronized void close() throws IOException { try { if (channel == null) { log.log(WARNING, "Close called before response was committed, in " + getClass().getName()); commit(Response.Status.INTERNAL_SERVER_ERROR); } json.close(); out.close(); } finally { if (channel != null) channel.close(logException); } } synchronized void writePathId(String path) throws IOException { json.writeStringField("pathId", path); } synchronized void writeMessage(String message) throws IOException { json.writeStringField("message", message); } synchronized void writeDocId(DocumentId id) throws IOException { json.writeStringField("id", id.toString()); } synchronized void writeSingleDocument(Document document) throws IOException { new JsonWriter(json).writeFields(document); } synchronized void writeDocumentsArrayStart() throws IOException { json.writeArrayFieldStart("documents"); } synchronized void writeDocumentValue(Document document) throws IOException { new JsonWriter(json).write(document); } synchronized void writeArrayEnd() throws IOException { json.writeEndArray(); } synchronized void writeContinuation(String token) throws IOException { json.writeStringField("continuation", token); } } private static void options(Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { Response response = new Response(Response.Status.NO_CONTENT); response.headers().add("Allow", methods.stream().sorted().map(Method::name).collect(joining(","))); handler.handleResponse(response).close(logException); }); } private static void badRequest(HttpRequest request, IllegalArgumentException e, ResponseHandler handler) { loggingException(() -> { String message = Exceptions.toMessageString(e); log.log(FINE, () -> "Bad request for " + request.getMethod() + " at " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.BAD_REQUEST); }); } private static void notFound(HttpRequest request, Collection<String> paths, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "Nothing at '" + request.getUri().getRawPath() + "'. " + "Available paths are:\n" + String.join("\n", paths), handler) .respond(Response.Status.NOT_FOUND); }); } private static void methodNotAllowed(HttpRequest request, Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "'" + request.getMethod() + "' not allowed at '" + request.getUri().getRawPath() + "'. " + "Allowed methods are: " + methods.stream().sorted().map(Method::name).collect(joining(", ")), handler) .respond(Response.Status.METHOD_NOT_ALLOWED); }); } private static void overload(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Overload handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.TOO_MANY_REQUESTS); }); } private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) { loggingException(() -> { log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t); JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR); }); } private static void timeout(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.GATEWAY_TIMEOUT); }); } private static void loggingException(Exceptions.RunnableThrowingIOException runnable) { try { runnable.run(); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } @FunctionalInterface interface Operation { /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ boolean dispatch(); /** Wraps the operation parser in an Operation that is parsed the first time it is attempted dispatched. */ static Operation lazilyParsed(HttpRequest request, ResponseHandler handler, Supplier<Operation> parser) { AtomicReference<Operation> operation = new AtomicReference<>(); return () -> { try { return operation.updateAndGet(value -> value != null ? value : parser.get()).dispatch(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } return true; }; } } /** Attempts to send the given document operation, returning false if thes needs to be retried. */ private static boolean dispatchOperation(HttpRequest request, ResponseHandler handler, Supplier<Result> documentOperation) { if (request.isCancelled()) return true; Result result = documentOperation.get(); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) serverError(request, result.getError(), handler); return true; } /** Readable content channel which forwards data to a reader when closed. */ static class ForwardingContentChannel implements ContentChannel { private final ReadableContentChannel delegate = new ReadableContentChannel(); private final Consumer<InputStream> reader; public ForwardingContentChannel(Consumer<InputStream> reader) { this.reader = reader; } /** Write is complete when we have stored the buffer — call completion handler. */ @Override public void write(ByteBuffer buf, CompletionHandler handler) { try { delegate.write(buf, logException); handler.completed(); } catch (Exception e) { handler.failed(e); } } /** Close is complete when we have close the buffer. */ @Override public void close(CompletionHandler handler) { try { delegate.close(logException); try (UnsafeContentInputStream in = new UnsafeContentInputStream(delegate)) { reader.accept(in); } handler.completed(); } catch (Exception e) { handler.failed(e); } } } static class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } DocumentPut parsePut(InputStream inputStream, String docId) { return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT); } DocumentUpdate parseUpdate(InputStream inputStream, String docId) { return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE); } private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } } interface SuccessCallback { void onSuccess(Document document, JsonResponse response) throws IOException; } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response, SuccessCallback callback) { try (JsonResponse jsonResponse = JsonResponse.create(path, handler)) { if (response.isSuccess()) callback.onSuccess((response instanceof DocumentResponse) ? ((DocumentResponse) response).getDocument() : null, jsonResponse); else { jsonResponse.writeMessage(response.getTextMessage()); switch (response.outcome()) { case NOT_FOUND: jsonResponse.commit(Response.Status.NOT_FOUND); break; case CONDITION_FAILED: jsonResponse.commit(Response.Status.PRECONDITION_FAILED); break; case INSUFFICIENT_STORAGE: log.log(WARNING, "Insufficient storage left in cluster: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE); break; default: log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'"); case ERROR: log.log(WARNING, "Exception performing document operation: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR); } } } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response) { handle(path, handler, response, (document, jsonResponse) -> jsonResponse.commit(Response.Status.OK)); } private VisitorParameters parseParameters(HttpRequest request, DocumentPath path) { int wantedDocumentCount = Math.min(1 << 10, getProperty(request, WANTED_DOCUMENT_COUNT, numberParser).orElse(1)); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); int concurrency = Math.min(100, getProperty(request, CONCURRENCY, numberParser).orElse(1)); if (concurrency <= 0) throw new IllegalArgumentException("concurrency must be positive"); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = new VisitorParameters(Stream.of(getProperty(request, SELECTION), path.documentType(), path.namespace().map(value -> "id.namespace=='" + value + "'"), path.group().map(Group::selection)) .flatMap(Optional::stream) .reduce(new StringJoiner(") and (", "(", ")").setEmptyValue(""), StringJoiner::add, StringJoiner::merge) .toString()); getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency)); parameters.setTimeoutMs(visitTimeout.toMillis()); parameters.visitInconsistentBuckets(true); parameters.setPriority(DocumentProtocol.Priority.NORMAL_4); StorageCluster storageCluster = resolveCluster(cluster, clusters); parameters.setRoute(storageCluster.route()); parameters.setBucketSpace(resolveBucket(storageCluster, path.documentType(), List.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()), getProperty(request, BUCKET_SPACE))); return parameters; } private void visit(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { try { JsonResponse response = JsonResponse.create(request, handler); response.writeDocumentsArrayStart(); CountDownLatch latch = new CountDownLatch(1); parameters.setLocalDataHandler(new DumpVisitorDataHandler() { @Override public void onDocument(Document doc, long timeStamp) { loggingException(() -> { response.writeDocumentValue(doc); }); } @Override public void onRemove(DocumentId id) { } }); parameters.setControlHandler(new VisitorControlHandler() { @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); loggingException(() -> { response.writeArrayEnd(); switch (code) { case TIMEOUT: if ( ! hasVisitedAnyBuckets()) { response.writeMessage("No buckets visited within timeout of " + visitTimeout); response.respond(Response.Status.GATEWAY_TIMEOUT); break; } case SUCCESS: case ABORTED: if (getProgress() != null && ! getProgress().isFinished()) response.writeContinuation(getProgress().serializeToString()); response.respond(Response.Status.OK); break; default: response.writeMessage(message != null ? message : "Visiting failed"); response.respond(Response.Status.INTERNAL_SERVER_ERROR); } executor.execute(() -> { try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } visits.get(this).destroy(); }); }); } }); visits.put(parameters.getControlHandler(), access.createVisitorSession(parameters)); latch.countDown(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (ParseException e) { badRequest(request, new IllegalArgumentException(e), handler); } catch (RuntimeException e) { serverError(request, e, handler); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static Optional<String> getProperty(HttpRequest request, String name) { if ( ! request.parameters().containsKey(name)) return Optional.empty(); List<String> values = request.parameters().get(name); String value; if (values == null || values.isEmpty() || (value = values.get(values.size() - 1)) == null || value.isEmpty()) throw new IllegalArgumentException("Expected non-empty value for request property '" + name + "'"); return Optional.of(value); } private static <T> Optional<T> getProperty(HttpRequest request, String name, Parser<T> parser) { return getProperty(request, name).map(parser::parse); } @FunctionalInterface interface Parser<T> extends Function<String, T> { default T parse(String value) { try { return apply(value); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing '" + value + "': " + Exceptions.toMessageString(e)); } } } private class MeasuringResponseHandler implements ResponseHandler { private final ResponseHandler delegate; private final com.yahoo.documentapi.metrics.DocumentOperationType type; private final Instant start; private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) { this.delegate = delegate; this.type = type; this.start = start; } @Override public ContentChannel handleResponse(Response response) { switch (response.getStatus() / 100) { case 2: metrics.reportSuccessful(type, start); break; case 4: metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR); break; case 5: metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR); break; } return delegate.handleResponse(response); } } static class StorageCluster { private final String name; private final String configId; private final Map<String, String> documentBuckets; StorageCluster(String name, String configId, Map<String, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String configId() { return configId; } String route() { return "[Storage:cluster=" + name() + ";clusterconfigid=" + configId() + "]"; } Optional<String> bucketOf(String documentType) { return Optional.ofNullable(documentBuckets.get(documentType)); } } private static Map<String, StorageCluster> parseClusters(ClusterListConfig clusters, AllClustersBucketSpacesConfig buckets) { return clusters.storage().stream() .collect(toUnmodifiableMap(storage -> storage.name(), storage -> new StorageCluster(storage.name(), storage.configid(), buckets.cluster(storage.name()) .documentType().entrySet().stream() .collect(toMap(entry -> entry.getKey(), entry -> entry.getValue().bucketSpace()))))); } static StorageCluster resolveCluster(Optional<String> wanted, Map<String, StorageCluster> clusters) { if (clusters.isEmpty()) throw new IllegalArgumentException("Your Vespa deployment has no content clusters, so the document API is not enabled"); return wanted.map(cluster -> { if ( ! clusters.containsKey(cluster)) throw new IllegalArgumentException("Your Vespa deployment has no content cluster '" + cluster + "', only '" + String.join("', '", clusters.keySet()) + "'"); return clusters.get(cluster); }).orElseGet(() -> { if (clusters.size() > 1) throw new IllegalArgumentException("Please specify one of the content clusters in your Vespa deployment: '" + String.join("', '", clusters.keySet()) + "'"); return clusters.values().iterator().next(); }); } static String resolveBucket(StorageCluster cluster, Optional<String> documentType, List<String> bucketSpaces, Optional<String> bucketSpace) { return documentType.map(type -> cluster.bucketOf(type) .orElseThrow(() -> new IllegalArgumentException("Document type '" + type + "' in cluster '" + cluster.name() + "' is not mapped to a known bucket space"))) .or(() -> bucketSpace.map(space -> { if ( ! bucketSpaces.contains(space)) throw new IllegalArgumentException("Bucket space '" + space + "' is not a known bucket space; expected one of " + String.join(", ", bucketSpaces)); return space; })) .orElse(FixedBucketSpaces.defaultSpace()); } private static class DocumentPath { private final Path path; private final Optional<Group> group; DocumentPath(Path path) { this.path = requireNonNull(path); this.group = Optional.ofNullable(path.get("number")).map(numberParser::parse).map(Group::of) .or(() -> Optional.ofNullable(path.get("group")).map(Group::of)); } DocumentId id() { return new DocumentId("id:" + requireNonNull(path.get("namespace")) + ":" + requireNonNull(path.get("documentType")) + ":" + group.map(Group::docIdPart).orElse("") + ":" + requireNonNull(path.get("docid"))); } String rawPath() { return path.asString(); } Optional<String> documentType() { return Optional.ofNullable(path.get("documentType")); } Optional<String> namespace() { return Optional.ofNullable(path.get("namespace")); } Optional<Group> group() { return group; } } static class Group { private final String value; private final String docIdPart; private final String selection; private Group(String value, String docIdPart, String selection) { Text.validateTextString(value) .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); }); this.value = value; this.docIdPart = docIdPart; this.selection = selection; } public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); } public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); } public String value() { return value; } public String docIdPart() { return docIdPart; } public String selection() { return selection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Group group = (Group) o; return value.equals(group.value) && docIdPart.equals(group.docIdPart) && selection.equals(group.selection); } @Override public int hashCode() { return Objects.hash(value, docIdPart, selection); } @Override public String toString() { return "Group{" + "value='" + value + '\'' + ", docIdPart='" + docIdPart + '\'' + ", selection='" + selection + '\'' + '}'; } } }
This shouldn't happen, so throw on it. It would cause multiple operations to be sent to the backend for a single request. Can guard here against buggy usage of this class.
boolean dispatch() { if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug");
boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
Old code stupidly tried to parse every time ... ヽ( ಠ益ಠ )ノ
boolean dispatch() { if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
operation = parse();
boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
Exception handling now run for all operations — not just the lazily parsed ones.
boolean dispatch() { if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
}
boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
This is now also run for all operations — not just those against the async-session.
boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
if (request.isCancelled())
boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Comcurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.get(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private Supplier<Boolean> operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * This return {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ abstract Supplier<Boolean> parse(); }
```suggestion // TODO(bjorncs,jonmv) Cleanup once old restapi handler is gone ```
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) { String oldHandlerName = "com.yahoo.document.restapi.resource.RestApi"; String bindingSuffix = "/document/v1/*"; var oldHandler = newVespaClientHandler(oldHandlerName, options.useNewRestapiHandler ? null : bindingSuffix, options); cluster.addComponent(oldHandler); var executor = new Threadpool("restapi-handler", cluster, options.restApiThreadpoolOptions, options.feedThreadPoolSizeFactor); oldHandler.inject(executor); oldHandler.addComponent(executor); if (options.useNewRestapiHandler) { String newHandlerName = "com.yahoo.document.restapi.resource.DocumentV1ApiHandler"; var newHandler = newVespaClientHandler(newHandlerName, bindingSuffix, options); cluster.addComponent(newHandler); } }
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) { String oldHandlerName = "com.yahoo.document.restapi.resource.RestApi"; String bindingSuffix = "/document/v1/*"; var oldHandler = newVespaClientHandler(oldHandlerName, options.useNewRestapiHandler ? null : bindingSuffix, options); cluster.addComponent(oldHandler); var executor = new Threadpool("restapi-handler", cluster, options.restApiThreadpoolOptions, options.feedThreadPoolSizeFactor); oldHandler.inject(executor); oldHandler.addComponent(executor); if (options.useNewRestapiHandler) { String newHandlerName = "com.yahoo.document.restapi.resource.DocumentV1ApiHandler"; var newHandler = newVespaClientHandler(newHandlerName, bindingSuffix, options); cluster.addComponent(newHandler); } }
class ContainerDocumentApi { private static final int FALLBACK_MAX_POOL_SIZE = 0; private static final int FALLBACK_CORE_POOL_SIZE = 0; public ContainerDocumentApi(ContainerCluster<?> cluster, Options options) { addRestApiHandler(cluster, options); addFeedHandler(cluster, options); } private static void addFeedHandler(ContainerCluster<?> cluster, Options options) { String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi"; var handler = newVespaClientHandler( "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options); cluster.addComponent(handler); var executor = new Threadpool( "feedapi-handler", cluster, options.feedApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } private static Handler<AbstractConfigProducer<?>> newVespaClientHandler( String componentId, String bindingSuffix, Options options) { Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), "")); if (bindingSuffix == null) return handler; if (options.bindings.isEmpty()) { handler.addServerBindings( SystemBindingPattern.fromHttpPath(bindingSuffix), SystemBindingPattern.fromHttpPath(bindingSuffix + '/')); } else { for (String rootBinding : options.bindings) { String pathWithoutLeadingSlash = bindingSuffix.substring(1); handler.addServerBindings( UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash), UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash + '/')); } } return handler; } public static final class Options { private final Collection<String> bindings; private final ContainerThreadpool.UserOptions restApiThreadpoolOptions; private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions; private final double feedThreadPoolSizeFactor; private final boolean useNewRestapiHandler; public Options(Collection<String> bindings, ContainerThreadpool.UserOptions restApiThreadpoolOptions, ContainerThreadpool.UserOptions feedApiThreadpoolOptions, double feedThreadPoolSizeFactor, boolean useNewRestapiHandler) { this.bindings = Collections.unmodifiableCollection(bindings); this.restApiThreadpoolOptions = restApiThreadpoolOptions; this.feedApiThreadpoolOptions = feedApiThreadpoolOptions; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; this.useNewRestapiHandler = useNewRestapiHandler; } } private static class Threadpool extends ContainerThreadpool { private final ContainerCluster<?> cluster; private final double feedThreadPoolSizeFactor; Threadpool(String name, ContainerCluster<?> cluster, ContainerThreadpool.UserOptions threadpoolOptions, double feedThreadPoolSizeFactor ) { super(name, threadpoolOptions); this.cluster = cluster; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; } @Override public void getConfig(ContainerThreadpoolConfig.Builder builder) { super.getConfig(builder); if (hasUserOptions()) return; builder.maxThreads(maxPoolSize()); builder.minThreads(minPoolSize()); builder.queueSize(500); } private int maxPoolSize() { double vcpu = vcpu(cluster).orElse(0); if (vcpu == 0) return FALLBACK_MAX_POOL_SIZE; return Math.max(2, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor)); } private int minPoolSize() { double vcpu = vcpu(cluster).orElse(0); if (vcpu == 0) return FALLBACK_CORE_POOL_SIZE; return Math.max(1, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor * 0.5)); } } }
class ContainerDocumentApi { private static final int FALLBACK_MAX_POOL_SIZE = 0; private static final int FALLBACK_CORE_POOL_SIZE = 0; public ContainerDocumentApi(ContainerCluster<?> cluster, Options options) { addRestApiHandler(cluster, options); addFeedHandler(cluster, options); } private static void addFeedHandler(ContainerCluster<?> cluster, Options options) { String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi"; var handler = newVespaClientHandler( "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options); cluster.addComponent(handler); var executor = new Threadpool( "feedapi-handler", cluster, options.feedApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } private static Handler<AbstractConfigProducer<?>> newVespaClientHandler( String componentId, String bindingSuffix, Options options) { Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), "")); if (bindingSuffix == null) return handler; if (options.bindings.isEmpty()) { handler.addServerBindings( SystemBindingPattern.fromHttpPath(bindingSuffix), SystemBindingPattern.fromHttpPath(bindingSuffix + '/')); } else { for (String rootBinding : options.bindings) { String pathWithoutLeadingSlash = bindingSuffix.substring(1); handler.addServerBindings( UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash), UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash + '/')); } } return handler; } public static final class Options { private final Collection<String> bindings; private final ContainerThreadpool.UserOptions restApiThreadpoolOptions; private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions; private final double feedThreadPoolSizeFactor; private final boolean useNewRestapiHandler; public Options(Collection<String> bindings, ContainerThreadpool.UserOptions restApiThreadpoolOptions, ContainerThreadpool.UserOptions feedApiThreadpoolOptions, double feedThreadPoolSizeFactor, boolean useNewRestapiHandler) { this.bindings = Collections.unmodifiableCollection(bindings); this.restApiThreadpoolOptions = restApiThreadpoolOptions; this.feedApiThreadpoolOptions = feedApiThreadpoolOptions; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; this.useNewRestapiHandler = useNewRestapiHandler; } } private static class Threadpool extends ContainerThreadpool { private final ContainerCluster<?> cluster; private final double feedThreadPoolSizeFactor; Threadpool(String name, ContainerCluster<?> cluster, ContainerThreadpool.UserOptions threadpoolOptions, double feedThreadPoolSizeFactor ) { super(name, threadpoolOptions); this.cluster = cluster; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; } @Override public void getConfig(ContainerThreadpoolConfig.Builder builder) { super.getConfig(builder); if (hasUserOptions()) return; builder.maxThreads(maxPoolSize()); builder.minThreads(minPoolSize()); builder.queueSize(500); } private int maxPoolSize() { double vcpu = vcpu(cluster).orElse(0); if (vcpu == 0) return FALLBACK_MAX_POOL_SIZE; return Math.max(2, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor)); } private int minPoolSize() { double vcpu = vcpu(cluster).orElse(0); if (vcpu == 0) return FALLBACK_CORE_POOL_SIZE; return Math.max(1, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor * 0.5)); } } }
Perhaps rename this method as well, to be in line with others? `deleteSessionFromFileSystem`?
public void delete(Session session) { long sessionId = session.getSessionId(); deleteSessionFromZooKeeper(session); sessionCache.remove(sessionId); deleteLocalSession(session); }
deleteLocalSession(session);
public void delete(Session session) { long sessionId = session.getSessionId(); deleteSessionFromZooKeeper(session); sessionCache.remove(sessionId); deleteLocalSession(session); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Map<Long, Session> sessionCache = new ConcurrentHashMap<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); loadAll(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadAll() { loadSessionsFromFileSystem(); loadSessions(); } public synchronized void addSession(Session session) { long sessionId = session.getSessionId(); sessionCache.put(sessionId, session); } public Collection<Session> getSessions() { return sessionCache.values(); } private void loadSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { createSessionFromId(Long.parseLong(session.getName())); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); Session session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } public void deleteLocalSession(Session session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<Session> sessions = new ArrayList<>(sessionCache.values()); for (Session session : sessions) { deleteLocalSession(session); } } public Session getSession(long sessionId) { return sessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public synchronized Session createSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, Optional.empty(), Optional.empty()); sessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public void deactivateAndUpdateCache(Session session) { Session deactivated = session.deactivated(); sessionCache.put(deactivated.getSessionId(), deactivated); } public void deleteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadSessions() throws NumberFormatException { getRemoteSessionsFromZooKeeper().forEach(this::sessionAdded); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public synchronized void sessionAdded(long sessionId) { log.log(Level.FINE, () -> "Adding session " + sessionId); Session session = createSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createSessionFromDistributedApplicationPackage(sessionId); } void activate(Session session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + session); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(Session session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepare(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(Session session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); Session activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); sessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : sessionCache.values()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (Session candidate : sessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { delete(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { delete(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(Session candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(Session candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private Session createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); Session session = new Session(tenantName, sessionId, sessionZKClient, app); waiter.awaitCompletion(timeoutBudget.timeLeft()); addSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); Session activeSession = getSession(currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(ensureApplicationLoaded(activeSession)); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createSessionWithApplicationPackage(sessionId, applicationPackage); } void createSessionWithApplicationPackage(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, applicationPackage); addSession(session); } /** * Returns a new session for the given session id if it does not already exist. * Will also add the session to the session cache if necessary */ public void createSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Session " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating session for tenant '" + tenantName + "' with id " + sessionId); try { ApplicationPackage applicationPackage = createApplicationPackage(sessionDir, applicationId, sessionId, false); createSessionWithApplicationPackage(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, Session session) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, session, metrics, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(session); } } @Override public String toString() { return getSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (Session session : sessionCache.values()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (sessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Map<Long, Session> sessionCache = new ConcurrentHashMap<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); loadAll(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadAll() { loadSessionsFromFileSystem(); loadSessions(); } public synchronized void addSession(Session session) { long sessionId = session.getSessionId(); sessionCache.put(sessionId, session); } public Collection<Session> getSessions() { return sessionCache.values(); } private void loadSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { createSessionFromId(Long.parseLong(session.getName())); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); Session session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } public void deleteLocalSession(Session session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<Session> sessions = new ArrayList<>(sessionCache.values()); for (Session session : sessions) { deleteLocalSession(session); } } public Session getSession(long sessionId) { return sessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public synchronized Session createSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, Optional.empty(), Optional.empty()); sessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public void deactivateAndUpdateCache(Session session) { Session deactivated = session.deactivated(); sessionCache.put(deactivated.getSessionId(), deactivated); } public void deleteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadSessions() throws NumberFormatException { getRemoteSessionsFromZooKeeper().forEach(this::sessionAdded); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public synchronized void sessionAdded(long sessionId) { log.log(Level.FINE, () -> "Adding session " + sessionId); Session session = createSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createSessionFromDistributedApplicationPackage(sessionId); } void activate(Session session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + session); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(Session session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepare(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(Session session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); Session activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); sessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : sessionCache.values()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (Session candidate : sessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { delete(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { delete(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(Session candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(Session candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private Session createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); Session session = new Session(tenantName, sessionId, sessionZKClient, app); waiter.awaitCompletion(timeoutBudget.timeLeft()); addSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); Session activeSession = getSession(currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(ensureApplicationLoaded(activeSession)); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createSessionWithApplicationPackage(sessionId, applicationPackage); } void createSessionWithApplicationPackage(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, applicationPackage); addSession(session); } /** * Returns a new session for the given session id if it does not already exist. * Will also add the session to the session cache if necessary */ public void createSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Session " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating session for tenant '" + tenantName + "' with id " + sessionId); try { ApplicationPackage applicationPackage = createApplicationPackage(sessionDir, applicationId, sessionId, false); createSessionWithApplicationPackage(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, Session session) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, session, metrics, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(session); } } @Override public String toString() { return getSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (Session session : sessionCache.values()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (sessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Yeah, there is more renaming, collapsing and simplification that can be done. Will follow up in a later PR
public void delete(Session session) { long sessionId = session.getSessionId(); deleteSessionFromZooKeeper(session); sessionCache.remove(sessionId); deleteLocalSession(session); }
deleteLocalSession(session);
public void delete(Session session) { long sessionId = session.getSessionId(); deleteSessionFromZooKeeper(session); sessionCache.remove(sessionId); deleteLocalSession(session); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Map<Long, Session> sessionCache = new ConcurrentHashMap<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); loadAll(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadAll() { loadSessionsFromFileSystem(); loadSessions(); } public synchronized void addSession(Session session) { long sessionId = session.getSessionId(); sessionCache.put(sessionId, session); } public Collection<Session> getSessions() { return sessionCache.values(); } private void loadSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { createSessionFromId(Long.parseLong(session.getName())); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); Session session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } public void deleteLocalSession(Session session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<Session> sessions = new ArrayList<>(sessionCache.values()); for (Session session : sessions) { deleteLocalSession(session); } } public Session getSession(long sessionId) { return sessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public synchronized Session createSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, Optional.empty(), Optional.empty()); sessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public void deactivateAndUpdateCache(Session session) { Session deactivated = session.deactivated(); sessionCache.put(deactivated.getSessionId(), deactivated); } public void deleteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadSessions() throws NumberFormatException { getRemoteSessionsFromZooKeeper().forEach(this::sessionAdded); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public synchronized void sessionAdded(long sessionId) { log.log(Level.FINE, () -> "Adding session " + sessionId); Session session = createSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createSessionFromDistributedApplicationPackage(sessionId); } void activate(Session session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + session); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(Session session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepare(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(Session session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); Session activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); sessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : sessionCache.values()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (Session candidate : sessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { delete(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { delete(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(Session candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(Session candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private Session createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); Session session = new Session(tenantName, sessionId, sessionZKClient, app); waiter.awaitCompletion(timeoutBudget.timeLeft()); addSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); Session activeSession = getSession(currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(ensureApplicationLoaded(activeSession)); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createSessionWithApplicationPackage(sessionId, applicationPackage); } void createSessionWithApplicationPackage(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, applicationPackage); addSession(session); } /** * Returns a new session for the given session id if it does not already exist. * Will also add the session to the session cache if necessary */ public void createSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Session " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating session for tenant '" + tenantName + "' with id " + sessionId); try { ApplicationPackage applicationPackage = createApplicationPackage(sessionDir, applicationId, sessionId, false); createSessionWithApplicationPackage(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, Session session) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, session, metrics, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(session); } } @Override public String toString() { return getSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (Session session : sessionCache.values()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (sessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Map<Long, Session> sessionCache = new ConcurrentHashMap<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); loadAll(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadAll() { loadSessionsFromFileSystem(); loadSessions(); } public synchronized void addSession(Session session) { long sessionId = session.getSessionId(); sessionCache.put(sessionId, session); } public Collection<Session> getSessions() { return sessionCache.values(); } private void loadSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { createSessionFromId(Long.parseLong(session.getName())); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); Session session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public Session createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } public void deleteLocalSession(Session session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<Session> sessions = new ArrayList<>(sessionCache.values()); for (Session session : sessions) { deleteLocalSession(session); } } public Session getSession(long sessionId) { return sessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public synchronized Session createSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, Optional.empty(), Optional.empty()); sessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public void deactivateAndUpdateCache(Session session) { Session deactivated = session.deactivated(); sessionCache.put(deactivated.getSessionId(), deactivated); } public void deleteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadSessions() throws NumberFormatException { getRemoteSessionsFromZooKeeper().forEach(this::sessionAdded); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public synchronized void sessionAdded(long sessionId) { log.log(Level.FINE, () -> "Adding session " + sessionId); Session session = createSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createSessionFromDistributedApplicationPackage(sessionId); } void activate(Session session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + session); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); sessionCache.remove(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(Session session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepare(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(Session session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); Session activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); sessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : sessionCache.values()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (Session candidate : sessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { delete(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { delete(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(Session candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(Session candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private Session createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); Session session = new Session(tenantName, sessionId, sessionZKClient, app); waiter.awaitCompletion(timeoutBudget.timeLeft()); addSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); Session activeSession = getSession(currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(ensureApplicationLoaded(activeSession)); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createSessionWithApplicationPackage(sessionId, applicationPackage); } void createSessionWithApplicationPackage(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); Session session = new Session(tenantName, sessionId, sessionZKClient, applicationPackage); addSession(session); } /** * Returns a new session for the given session id if it does not already exist. * Will also add the session to the session cache if necessary */ public void createSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Session " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating session for tenant '" + tenantName + "' with id " + sessionId); try { ApplicationPackage applicationPackage = createApplicationPackage(sessionDir, applicationId, sessionId, false); createSessionWithApplicationPackage(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, Session session) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, session, metrics, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(session); } } @Override public String toString() { return getSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (Session session : sessionCache.values()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (sessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
This is an improvement. I'm a bit sceptical of the method API: Usually, some condition has led to the caller wanting to set this node to dirty. I would guess that whole condition needs to be reevaluated under the unallocated lock? More than just the presence of the node may have changed between testing the condition and coming to this point where the lock is held.
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; }
Optional<Node> currentNode = db.readNode(node.hostname());
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final DockerImages dockerImages; private final JobControl jobControl; private final Applications applications; private final boolean canProvisionHosts; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider.getHostResourcesCalculator(), curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), flagSource, config.useCuratorClientCache(), provisionServiceProvider.getHostProvisioner().isPresent(), zone.environment().isProduction() && provisionServiceProvider.getHostProvisioner().isEmpty() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, HostResourcesCalculator resourcesCalculator, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage dockerImage, FlagSource flagSource, boolean useCuratorClientCache, boolean canProvisionHosts, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = resourcesCalculator; this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.dockerImages = new DockerImages(db, dockerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.canProvisionHosts = canProvisionHosts; this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** Returns the Docker image to use for given node */ public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public DockerImages dockerImages() { return dockerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } return Collections.singletonList(getNodeAcl(node, candidates)); } /** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */ public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname, Flavor flavor, Optional<TenantName> reservedTo, NodeType type) { if (ipConfig.primary().isEmpty()) ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname)); return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type, Optional.empty()); } public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) { return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(NestedTransaction transaction, ProvisionLock lock) { deactivate(db.readNodes(lock.application(), State.reserved, State.active), transaction, lock); applications.remove(lock.application(), transaction, lock); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction, @SuppressWarnings("unused") ProvisionLock lock) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(Collections.singletonList(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); List<Node> removed = new ArrayList<>(children); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state. */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * @return the nodes in their new state. */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state. */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock Already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if ( canProvisionHosts()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns whether this repository can provision hosts on demand */ public boolean canProvisionHosts() { return canProvisionHosts; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final DockerImages dockerImages; private final JobControl jobControl; private final Applications applications; private final boolean canProvisionHosts; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider.getHostResourcesCalculator(), curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), flagSource, config.useCuratorClientCache(), provisionServiceProvider.getHostProvisioner().isPresent(), zone.environment().isProduction() && provisionServiceProvider.getHostProvisioner().isEmpty() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, HostResourcesCalculator resourcesCalculator, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage dockerImage, FlagSource flagSource, boolean useCuratorClientCache, boolean canProvisionHosts, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = resourcesCalculator; this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.dockerImages = new DockerImages(db, dockerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.canProvisionHosts = canProvisionHosts; this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** Returns the Docker image to use for given node */ public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public DockerImages dockerImages() { return dockerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } return Collections.singletonList(getNodeAcl(node, candidates)); } /** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */ public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname, Flavor flavor, Optional<TenantName> reservedTo, NodeType type) { if (ipConfig.primary().isEmpty()) ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname)); return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type, Optional.empty()); } public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) { return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(NestedTransaction transaction, ProvisionLock lock) { deactivate(db.readNodes(lock.application(), State.reserved, State.active), transaction, lock); applications.remove(lock.application(), transaction, lock); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction, @SuppressWarnings("unused") ProvisionLock lock) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(Collections.singletonList(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); List<Node> removed = new ArrayList<>(children); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state. */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * @return the nodes in their new state. */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state. */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock Already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if ( canProvisionHosts()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns whether this repository can provision hosts on demand */ public boolean canProvisionHosts() { return canProvisionHosts; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Ah, yes you're right.
public void restart(ApplicationId application, HostFilter filter) { nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter))); }
}
public void restart(ApplicationId application, HostFilter filter) { nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter))); }
class NodeRepositoryProvisioner implements Provisioner { private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName()); private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; private final CapacityPolicies capacityPolicies; private final Zone zone; private final Preparer preparer; private final Activator activator; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final NodeResourceLimits nodeResourceLimits; private final IntFlag tenantNodeQuota; @Inject public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone, ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); this.capacityPolicies = new CapacityPolicies(nodeRepository); this.zone = zone; this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource)); this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.preparer = new Preparer(nodeRepository, flagSource, provisionServiceProvider.getHostProvisioner(), loadBalancerProvisioner); this.activator = new Activator(nodeRepository, loadBalancerProvisioner); this.tenantNodeQuota = Flags.TENANT_NODE_QUOTA.bindTo(flagSource); } /** * Returns a list of nodes in the prepared or active state, matching the given constraints. * The nodes are ordered by increasing index number. */ @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); if ( ! hasQuota(application, requested.maxResources().nodes())) throw new IllegalArgumentException(requested + " requested for " + cluster + ". Max value exceeds your quota. Resolve this at https: nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), cluster); nodeResourceLimits.ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), cluster); int groups; NodeResources resources; NodeSpec nodeSpec; if ( requested.type() == NodeType.tenant) { ClusterResources target = decideTargetResources(application, cluster, requested); int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); groups = Math.min(target.groups(), nodeCount); resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive()); nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); logIfDownscaled(target.nodes(), nodeCount, cluster, logger); } else { groups = 1; resources = requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); } @Override public void activate(Collection<HostSpec> hosts, ActivationContext context, ApplicationTransaction transaction) { validate(hosts); activator.activate(hosts, context.generation(), transaction); } @Override @Override public void remove(ApplicationTransaction transaction) { nodeRepository.deactivate(transaction); loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(transaction)); } @Override public ProvisionLock lock(ApplicationId application) { return new ProvisionLock(application, nodeRepository.lock(application)); } /** * Returns the target cluster resources, a value between the min and max in the requested capacity, * and updates the application store with the received min and max. */ private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.lock(applicationId)) { Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId)); application = application.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); nodeRepository.applications().put(application, lock); return application.clusters().get(clusterSpec.id()).targetResources() .orElseGet(() -> currentResources(applicationId, clusterSpec, requested)); } } /** Returns the current resources of this cluster, or the closes */ private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active)) .cluster(clusterSpec.id()) .not().retired() .not().removable() .asList(); boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment ? new AllocatableClusterResources(requested.minResources(), clusterSpec.type(), clusterSpec.isExclusive(), nodeRepository) : new AllocatableClusterResources(nodes, nodeRepository); return within(Limits.of(requested), clusterSpec.isExclusive(), currentResources, firstDeployment); } /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, boolean exclusive, AllocatableClusterResources current, boolean firstDeployment) { if (limits.min().equals(limits.max())) return limits.min(); var currentAsAdvertised = current.toAdvertisedClusterResources(); if (! firstDeployment && currentAsAdvertised.isWithin(limits.min(), limits.max())) return currentAsAdvertised; return allocationOptimizer.findBestAllocation(ResourceTarget.preserve(current), current, limits, exclusive) .orElseThrow(() -> new IllegalArgumentException("No allocation possible within " + limits)) .toAdvertisedClusterResources(); } private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + ", downscaling to " + actualNodes + " nodes in " + zone.environment()); } private boolean hasQuota(ApplicationId application, int requestedNodes) { if ( ! this.zone.system().isPublic()) return true; if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60; if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75; return requestedNodes <= tenantNodeQuota.with(FetchVector.Dimension.APPLICATION_ID, application.tenant().value()).value(); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index())); List<HostSpec> hosts = new ArrayList<>(nodes.size()); for (Node node : nodes) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " - " + node.flavor()); Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new); hosts.add(new HostSpec(node.hostname(), nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository), node.flavor().resources(), requestedResources, nodeAllocation.membership(), node.status().vespaVersion(), nodeAllocation.networkPorts(), node.status().dockerImage())); if (nodeAllocation.networkPorts().isPresent()) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " has port allocations"); } } return hosts; } private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if (host.membership().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if (host.membership().get().cluster().group().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } } }
class NodeRepositoryProvisioner implements Provisioner { private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName()); private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; private final CapacityPolicies capacityPolicies; private final Zone zone; private final Preparer preparer; private final Activator activator; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final NodeResourceLimits nodeResourceLimits; private final IntFlag tenantNodeQuota; @Inject public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone, ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); this.capacityPolicies = new CapacityPolicies(nodeRepository); this.zone = zone; this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource)); this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.preparer = new Preparer(nodeRepository, flagSource, provisionServiceProvider.getHostProvisioner(), loadBalancerProvisioner); this.activator = new Activator(nodeRepository, loadBalancerProvisioner); this.tenantNodeQuota = Flags.TENANT_NODE_QUOTA.bindTo(flagSource); } /** * Returns a list of nodes in the prepared or active state, matching the given constraints. * The nodes are ordered by increasing index number. */ @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); if ( ! hasQuota(application, requested.maxResources().nodes())) throw new IllegalArgumentException(requested + " requested for " + cluster + ". Max value exceeds your quota. Resolve this at https: nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), cluster); nodeResourceLimits.ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), cluster); int groups; NodeResources resources; NodeSpec nodeSpec; if ( requested.type() == NodeType.tenant) { ClusterResources target = decideTargetResources(application, cluster, requested); int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); groups = Math.min(target.groups(), nodeCount); resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive()); nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); logIfDownscaled(target.nodes(), nodeCount, cluster, logger); } else { groups = 1; resources = requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); } @Override public void activate(Collection<HostSpec> hosts, ActivationContext context, ApplicationTransaction transaction) { validate(hosts); activator.activate(hosts, context.generation(), transaction); } @Override @Override public void remove(ApplicationTransaction transaction) { nodeRepository.deactivate(transaction); loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(transaction)); } @Override public ProvisionLock lock(ApplicationId application) { return new ProvisionLock(application, nodeRepository.lock(application)); } /** * Returns the target cluster resources, a value between the min and max in the requested capacity, * and updates the application store with the received min and max. */ private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.lock(applicationId)) { Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId)); application = application.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); nodeRepository.applications().put(application, lock); return application.clusters().get(clusterSpec.id()).targetResources() .orElseGet(() -> currentResources(applicationId, clusterSpec, requested)); } } /** Returns the current resources of this cluster, or the closes */ private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active)) .cluster(clusterSpec.id()) .not().retired() .not().removable() .asList(); boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment ? new AllocatableClusterResources(requested.minResources(), clusterSpec.type(), clusterSpec.isExclusive(), nodeRepository) : new AllocatableClusterResources(nodes, nodeRepository); return within(Limits.of(requested), clusterSpec.isExclusive(), currentResources, firstDeployment); } /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, boolean exclusive, AllocatableClusterResources current, boolean firstDeployment) { if (limits.min().equals(limits.max())) return limits.min(); var currentAsAdvertised = current.toAdvertisedClusterResources(); if (! firstDeployment && currentAsAdvertised.isWithin(limits.min(), limits.max())) return currentAsAdvertised; return allocationOptimizer.findBestAllocation(ResourceTarget.preserve(current), current, limits, exclusive) .orElseThrow(() -> new IllegalArgumentException("No allocation possible within " + limits)) .toAdvertisedClusterResources(); } private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + ", downscaling to " + actualNodes + " nodes in " + zone.environment()); } private boolean hasQuota(ApplicationId application, int requestedNodes) { if ( ! this.zone.system().isPublic()) return true; if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60; if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75; return requestedNodes <= tenantNodeQuota.with(FetchVector.Dimension.APPLICATION_ID, application.tenant().value()).value(); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index())); List<HostSpec> hosts = new ArrayList<>(nodes.size()); for (Node node : nodes) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " - " + node.flavor()); Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new); hosts.add(new HostSpec(node.hostname(), nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository), node.flavor().resources(), requestedResources, nodeAllocation.membership(), node.status().vespaVersion(), nodeAllocation.networkPorts(), node.status().dockerImage())); if (nodeAllocation.networkPorts().isPresent()) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " has port allocations"); } } return hosts; } private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if (host.membership().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if (host.membership().get().cluster().group().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } } }
Agree, the API is not ideal.
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; }
Optional<Node> currentNode = db.readNode(node.hostname());
private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final DockerImages dockerImages; private final JobControl jobControl; private final Applications applications; private final boolean canProvisionHosts; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider.getHostResourcesCalculator(), curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), flagSource, config.useCuratorClientCache(), provisionServiceProvider.getHostProvisioner().isPresent(), zone.environment().isProduction() && provisionServiceProvider.getHostProvisioner().isEmpty() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, HostResourcesCalculator resourcesCalculator, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage dockerImage, FlagSource flagSource, boolean useCuratorClientCache, boolean canProvisionHosts, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = resourcesCalculator; this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.dockerImages = new DockerImages(db, dockerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.canProvisionHosts = canProvisionHosts; this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** Returns the Docker image to use for given node */ public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public DockerImages dockerImages() { return dockerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } return Collections.singletonList(getNodeAcl(node, candidates)); } /** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */ public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname, Flavor flavor, Optional<TenantName> reservedTo, NodeType type) { if (ipConfig.primary().isEmpty()) ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname)); return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type, Optional.empty()); } public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) { return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(NestedTransaction transaction, ProvisionLock lock) { deactivate(db.readNodes(lock.application(), State.reserved, State.active), transaction, lock); applications.remove(lock.application(), transaction, lock); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction, @SuppressWarnings("unused") ProvisionLock lock) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(Collections.singletonList(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); List<Node> removed = new ArrayList<>(children); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state. */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * @return the nodes in their new state. */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state. */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock Already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if ( canProvisionHosts()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns whether this repository can provision hosts on demand */ public boolean canProvisionHosts() { return canProvisionHosts; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final DockerImages dockerImages; private final JobControl jobControl; private final Applications applications; private final boolean canProvisionHosts; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider.getHostResourcesCalculator(), curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), flagSource, config.useCuratorClientCache(), provisionServiceProvider.getHostProvisioner().isPresent(), zone.environment().isProduction() && provisionServiceProvider.getHostProvisioner().isEmpty() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, HostResourcesCalculator resourcesCalculator, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage dockerImage, FlagSource flagSource, boolean useCuratorClientCache, boolean canProvisionHosts, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = resourcesCalculator; this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.dockerImages = new DockerImages(db, dockerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.canProvisionHosts = canProvisionHosts; this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** Returns the Docker image to use for given node */ public DockerImage dockerImage(Node node) { return dockerImages.dockerImageFor(node.type()); } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public DockerImages dockerImages() { return dockerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } return Collections.singletonList(getNodeAcl(node, candidates)); } /** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */ public Node createNode(String openStackId, String hostname, IP.Config ipConfig, Optional<String> parentHostname, Flavor flavor, Optional<TenantName> reservedTo, NodeType type) { if (ipConfig.primary().isEmpty()) ipConfig = ipConfig.with(nameResolver.getAllByNameOrThrow(hostname)); return Node.create(openStackId, ipConfig, hostname, parentHostname, Optional.empty(), flavor, reservedTo, type, Optional.empty()); } public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) { return createNode(openStackId, hostname, IP.Config.EMPTY, parentHostname, flavor, Optional.empty(), type); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(NestedTransaction transaction, ProvisionLock lock) { deactivate(db.readNodes(lock.application(), State.reserved, State.active), transaction, lock); applications.remove(lock.application(), transaction, lock); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction, @SuppressWarnings("unused") ProvisionLock lock) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(Collections.singletonList(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); List<Node> removed = new ArrayList<>(children); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state. */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * @return the nodes in their new state. */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state. */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock Already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if ( canProvisionHosts()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns whether this repository can provision hosts on demand */ public boolean canProvisionHosts() { return canProvisionHosts; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
I think we should allow specifying docker image without registry
public static DockerImage fromString(String s) { if (s.isEmpty()) return EMPTY; int firstPathSeparator = s.indexOf('/'); if (firstPathSeparator < 0) throw new IllegalArgumentException("Missing path separator in '" + s + "'"); String registry = s.substring(0, firstPathSeparator); String repository = s.substring(firstPathSeparator + 1); if (repository.isEmpty()) throw new IllegalArgumentException("Repository must be non-empty in '" + s + "'"); int tagStart = repository.indexOf(':'); if (tagStart < 0) return new DockerImage(registry, repository, Optional.empty()); String tag = repository.substring(tagStart + 1); repository = repository.substring(0, tagStart); return new DockerImage(registry, repository, Optional.of(tag)); }
if (firstPathSeparator < 0) throw new IllegalArgumentException("Missing path separator in '" + s + "'");
public static DockerImage fromString(String s) { if (s.isEmpty()) return EMPTY; int firstPathSeparator = s.indexOf('/'); if (firstPathSeparator < 0) throw new IllegalArgumentException("Missing path separator in '" + s + "'"); String registry = s.substring(0, firstPathSeparator); String repository = s.substring(firstPathSeparator + 1); if (repository.isEmpty()) throw new IllegalArgumentException("Repository must be non-empty in '" + s + "'"); int tagStart = repository.indexOf(':'); if (tagStart < 0) return new DockerImage(registry, repository, Optional.empty()); String tag = repository.substring(tagStart + 1); repository = repository.substring(0, tagStart); return new DockerImage(registry, repository, Optional.of(tag)); }
class DockerImage { public static final DockerImage EMPTY = new DockerImage("", "", Optional.empty()); private final String registry; private final String repository; private final Optional<String> tag; DockerImage(String registry, String repository, Optional<String> tag) { this.registry = Objects.requireNonNull(registry, "registry must be non-null"); this.repository = Objects.requireNonNull(repository, "repository must be non-null"); this.tag = Objects.requireNonNull(tag, "tag must be non-null"); } /** Returns the registry-part of this, i.e. the host/port of the registry. */ public String registry() { return registry; } /** Returns the repository-part of this */ public String repository() { return repository; } /** Returns the registry and repository for this image, excluding its tag */ public String untagged() { return new DockerImage(registry, repository, Optional.empty()).asString(); } /** Returns this image's tag, if any */ public Optional<String> tag() { return tag; } /** Returns the tag as a {@link Version}, {@link Version public Version tagAsVersion() { return tag.map(Version::new).orElse(Version.emptyVersion); } /** Returns a copy of this tagged with the given version */ public DockerImage withTag(Version version) { return new DockerImage(registry, repository, Optional.of(version.toFullString())); } /** Returns a copy of this with registry set to given value */ public DockerImage withRegistry(String registry) { return new DockerImage(registry, repository, tag); } public String asString() { if (equals(EMPTY)) return ""; return registry + "/" + repository + tag.map(t -> ':' + t).orElse(""); } @Override public String toString() { return asString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DockerImage that = (DockerImage) o; return registry.equals(that.registry) && repository.equals(that.repository) && tag.equals(that.tag); } @Override public int hashCode() { return Objects.hash(registry, repository, tag); } public static DockerImage from(String registry, String repository) { return new DockerImage(registry, repository, Optional.empty()); } }
class DockerImage { public static final DockerImage EMPTY = new DockerImage("", "", Optional.empty()); private final String registry; private final String repository; private final Optional<String> tag; DockerImage(String registry, String repository, Optional<String> tag) { this.registry = Objects.requireNonNull(registry, "registry must be non-null"); this.repository = Objects.requireNonNull(repository, "repository must be non-null"); this.tag = Objects.requireNonNull(tag, "tag must be non-null"); } /** Returns the registry-part of this, i.e. the host/port of the registry. */ public String registry() { return registry; } /** Returns the repository-part of this */ public String repository() { return repository; } /** Returns the registry and repository for this image, excluding its tag */ public String untagged() { return new DockerImage(registry, repository, Optional.empty()).asString(); } /** Returns this image's tag, if any */ public Optional<String> tag() { return tag; } /** Returns the tag as a {@link Version}, {@link Version public Version tagAsVersion() { return tag.map(Version::new).orElse(Version.emptyVersion); } /** Returns a copy of this tagged with the given version */ public DockerImage withTag(Version version) { return new DockerImage(registry, repository, Optional.of(version.toFullString())); } /** Returns a copy of this with registry set to given value */ public DockerImage withRegistry(String registry) { return new DockerImage(registry, repository, tag); } public String asString() { if (equals(EMPTY)) return ""; return registry + "/" + repository + tag.map(t -> ':' + t).orElse(""); } @Override public String toString() { return asString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DockerImage that = (DockerImage) o; return registry.equals(that.registry) && repository.equals(that.repository) && tag.equals(that.tag); } @Override public int hashCode() { return Objects.hash(registry, repository, tag); } public static DockerImage from(String registry, String repository) { return new DockerImage(registry, repository, Optional.empty()); } }
Explicit is better I think. Won't the default depend on the container engine? I believe Docker uses hub.docker.com without registry, but that might not be the case for e.g. Podman?
public static DockerImage fromString(String s) { if (s.isEmpty()) return EMPTY; int firstPathSeparator = s.indexOf('/'); if (firstPathSeparator < 0) throw new IllegalArgumentException("Missing path separator in '" + s + "'"); String registry = s.substring(0, firstPathSeparator); String repository = s.substring(firstPathSeparator + 1); if (repository.isEmpty()) throw new IllegalArgumentException("Repository must be non-empty in '" + s + "'"); int tagStart = repository.indexOf(':'); if (tagStart < 0) return new DockerImage(registry, repository, Optional.empty()); String tag = repository.substring(tagStart + 1); repository = repository.substring(0, tagStart); return new DockerImage(registry, repository, Optional.of(tag)); }
if (firstPathSeparator < 0) throw new IllegalArgumentException("Missing path separator in '" + s + "'");
public static DockerImage fromString(String s) { if (s.isEmpty()) return EMPTY; int firstPathSeparator = s.indexOf('/'); if (firstPathSeparator < 0) throw new IllegalArgumentException("Missing path separator in '" + s + "'"); String registry = s.substring(0, firstPathSeparator); String repository = s.substring(firstPathSeparator + 1); if (repository.isEmpty()) throw new IllegalArgumentException("Repository must be non-empty in '" + s + "'"); int tagStart = repository.indexOf(':'); if (tagStart < 0) return new DockerImage(registry, repository, Optional.empty()); String tag = repository.substring(tagStart + 1); repository = repository.substring(0, tagStart); return new DockerImage(registry, repository, Optional.of(tag)); }
class DockerImage { public static final DockerImage EMPTY = new DockerImage("", "", Optional.empty()); private final String registry; private final String repository; private final Optional<String> tag; DockerImage(String registry, String repository, Optional<String> tag) { this.registry = Objects.requireNonNull(registry, "registry must be non-null"); this.repository = Objects.requireNonNull(repository, "repository must be non-null"); this.tag = Objects.requireNonNull(tag, "tag must be non-null"); } /** Returns the registry-part of this, i.e. the host/port of the registry. */ public String registry() { return registry; } /** Returns the repository-part of this */ public String repository() { return repository; } /** Returns the registry and repository for this image, excluding its tag */ public String untagged() { return new DockerImage(registry, repository, Optional.empty()).asString(); } /** Returns this image's tag, if any */ public Optional<String> tag() { return tag; } /** Returns the tag as a {@link Version}, {@link Version public Version tagAsVersion() { return tag.map(Version::new).orElse(Version.emptyVersion); } /** Returns a copy of this tagged with the given version */ public DockerImage withTag(Version version) { return new DockerImage(registry, repository, Optional.of(version.toFullString())); } /** Returns a copy of this with registry set to given value */ public DockerImage withRegistry(String registry) { return new DockerImage(registry, repository, tag); } public String asString() { if (equals(EMPTY)) return ""; return registry + "/" + repository + tag.map(t -> ':' + t).orElse(""); } @Override public String toString() { return asString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DockerImage that = (DockerImage) o; return registry.equals(that.registry) && repository.equals(that.repository) && tag.equals(that.tag); } @Override public int hashCode() { return Objects.hash(registry, repository, tag); } public static DockerImage from(String registry, String repository) { return new DockerImage(registry, repository, Optional.empty()); } }
class DockerImage { public static final DockerImage EMPTY = new DockerImage("", "", Optional.empty()); private final String registry; private final String repository; private final Optional<String> tag; DockerImage(String registry, String repository, Optional<String> tag) { this.registry = Objects.requireNonNull(registry, "registry must be non-null"); this.repository = Objects.requireNonNull(repository, "repository must be non-null"); this.tag = Objects.requireNonNull(tag, "tag must be non-null"); } /** Returns the registry-part of this, i.e. the host/port of the registry. */ public String registry() { return registry; } /** Returns the repository-part of this */ public String repository() { return repository; } /** Returns the registry and repository for this image, excluding its tag */ public String untagged() { return new DockerImage(registry, repository, Optional.empty()).asString(); } /** Returns this image's tag, if any */ public Optional<String> tag() { return tag; } /** Returns the tag as a {@link Version}, {@link Version public Version tagAsVersion() { return tag.map(Version::new).orElse(Version.emptyVersion); } /** Returns a copy of this tagged with the given version */ public DockerImage withTag(Version version) { return new DockerImage(registry, repository, Optional.of(version.toFullString())); } /** Returns a copy of this with registry set to given value */ public DockerImage withRegistry(String registry) { return new DockerImage(registry, repository, tag); } public String asString() { if (equals(EMPTY)) return ""; return registry + "/" + repository + tag.map(t -> ':' + t).orElse(""); } @Override public String toString() { return asString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DockerImage that = (DockerImage) o; return registry.equals(that.registry) && repository.equals(that.repository) && tag.equals(that.tag); } @Override public int hashCode() { return Objects.hash(registry, repository, tag); } public static DockerImage from(String registry, String repository) { return new DockerImage(registry, repository, Optional.empty()); } }
I don't like the spec here. _How much_ can you write without blocking? Generally, this should be a `while` loop, but since you only do a single write here, and then close, I guess this is fine.
public void run() { StatusResponse statusResponse = target.requestStatusHtml(); servletResponse.setStatus(statusResponse.statusCode); if (statusResponse.contentType != null) { servletResponse.setHeader("Content-Type", statusResponse.contentType); } output.setWriteListener(new WriteListener() { @Override public void onWritePossible() throws IOException { if (output.isReady()) { if (statusResponse.content != null) { output.write(statusResponse.content); } asyncContext.complete(); } } @Override public void onError(Throwable t) { log.log(Level.FINE, t, () -> "Failed to write status response: " + t.getMessage()); asyncContext.complete(); } }); }
asyncContext.complete();
public void run() { StatusResponse statusResponse = target.requestStatusHtml(); servletResponse.setStatus(statusResponse.statusCode); if (statusResponse.contentType != null) { servletResponse.setHeader("Content-Type", statusResponse.contentType); } output.setWriteListener(new WriteListener() { @Override public void onWritePossible() throws IOException { if (output.isReady()) { if (statusResponse.content != null) { output.write(statusResponse.content); } asyncContext.complete(); } } @Override public void onError(Throwable t) { log.log(Level.FINE, t, () -> "Failed to write status response: " + t.getMessage()); asyncContext.complete(); } }); }
class ProxyRequestTask implements Runnable { final AsyncContext asyncContext; final ProxyTarget target; final HttpServletResponse servletResponse; final ServletOutputStream output; ProxyRequestTask(AsyncContext asyncContext, ProxyTarget target, HttpServletResponse servletResponse, ServletOutputStream output) { this.asyncContext = asyncContext; this.target = target; this.servletResponse = servletResponse; this.output = output; } @Override }
class ProxyRequestTask implements Runnable { final AsyncContext asyncContext; final ProxyTarget target; final HttpServletResponse servletResponse; final ServletOutputStream output; ProxyRequestTask(AsyncContext asyncContext, ProxyTarget target, HttpServletResponse servletResponse, ServletOutputStream output) { this.asyncContext = asyncContext; this.target = target; this.servletResponse = servletResponse; this.output = output; } @Override }
```suggestion return lastResponse = getStatusResponse(); ``` I find this easier to read, but personal preference I guess :)
StatusResponse requestStatusHtml() { StatusResponse response = lastResponse; if (response != null && !response.isExpired()) { return response; } StatusResponse statusResponse = getStatusResponse(); lastResponse = statusResponse; return statusResponse; }
return statusResponse;
StatusResponse requestStatusHtml() { StatusResponse response = lastResponse; if (response != null && !response.isExpired()) { return response; } return this.lastResponse = getStatusResponse(); }
class ProxyTarget implements AutoCloseable { final int port; final Duration timeout; final SslContextFactory.Server sslContextFactory; volatile CloseableHttpClient client; volatile StatusResponse lastResponse; ProxyTarget(int port, Duration timeout, SslContextFactory.Server sslContextFactory) { this.port = port; this.timeout = timeout; this.sslContextFactory = sslContextFactory; } private StatusResponse getStatusResponse() { try (CloseableHttpResponse clientResponse = client().execute(new HttpGet("https: int statusCode = clientResponse.getStatusLine().getStatusCode(); HttpEntity entity = clientResponse.getEntity(); if (entity != null) { Header contentTypeHeader = entity.getContentType(); String contentType = contentTypeHeader != null ? contentTypeHeader.getValue() : null; byte[] content = EntityUtils.toByteArray(entity); return new StatusResponse(statusCode, contentType, content); } else { return new StatusResponse(statusCode, null, null); } } catch (Exception e) { log.log(Level.FINE, e, () -> "Proxy request failed" + e.getMessage()); return new StatusResponse(500, "text/plain", e.getMessage().getBytes()); } } private CloseableHttpClient client() { if (client == null) { synchronized (this) { if (client == null) { int timeoutMillis = (int) timeout.toMillis(); client = HttpClientBuilder.create() .disableAutomaticRetries() .setMaxConnPerRoute(4) .setSSLContext(getSslContext(sslContextFactory)) .setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE) .setUserTokenHandler(context -> null) .setUserAgent("health-check-proxy-client") .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(timeoutMillis) .setConnectionRequestTimeout(timeoutMillis) .setSocketTimeout(timeoutMillis) .build()) .build(); } } } return client; } private SSLContext getSslContext(SslContextFactory.Server sslContextFactory) { if (sslContextFactory.getNeedClientAuth()) { log.info(String.format("Port %d requires client certificate - client will provide its node certificate", port)); TransportSecurityOptions options = TransportSecurityUtils.getOptions() .orElseThrow(() -> new IllegalStateException("Vespa TLS configuration is required when using health check proxy to a port with client auth 'need'")); return new SslContextBuilder() .withKeyStore(options.getPrivateKeyFile().get(), options.getCertificatesFile().get()) .withTrustManager(new TrustAllX509TrustManager()) .build(); } else { log.info(String.format( "Port %d does not require a client certificate - client will not provide a certificate", port)); return new SslContextBuilder() .withTrustManager(new TrustAllX509TrustManager()) .build(); } } @Override public void close() throws IOException { synchronized (this) { if (client != null) { client.close(); client = null; } } } }
class ProxyTarget implements AutoCloseable { final int port; final Duration timeout; final SslContextFactory.Server sslContextFactory; volatile CloseableHttpClient client; volatile StatusResponse lastResponse; ProxyTarget(int port, Duration timeout, SslContextFactory.Server sslContextFactory) { this.port = port; this.timeout = timeout; this.sslContextFactory = sslContextFactory; } private StatusResponse getStatusResponse() { try (CloseableHttpResponse clientResponse = client().execute(new HttpGet("https: int statusCode = clientResponse.getStatusLine().getStatusCode(); HttpEntity entity = clientResponse.getEntity(); if (entity != null) { Header contentTypeHeader = entity.getContentType(); String contentType = contentTypeHeader != null ? contentTypeHeader.getValue() : null; byte[] content = EntityUtils.toByteArray(entity); return new StatusResponse(statusCode, contentType, content); } else { return new StatusResponse(statusCode, null, null); } } catch (Exception e) { log.log(Level.FINE, e, () -> "Proxy request failed" + e.getMessage()); return new StatusResponse(500, "text/plain", e.getMessage().getBytes()); } } private CloseableHttpClient client() { if (client == null) { synchronized (this) { if (client == null) { int timeoutMillis = (int) timeout.toMillis(); client = HttpClientBuilder.create() .disableAutomaticRetries() .setMaxConnPerRoute(4) .setSSLContext(getSslContext(sslContextFactory)) .setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE) .setUserTokenHandler(context -> null) .setUserAgent("health-check-proxy-client") .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(timeoutMillis) .setConnectionRequestTimeout(timeoutMillis) .setSocketTimeout(timeoutMillis) .build()) .build(); } } } return client; } private SSLContext getSslContext(SslContextFactory.Server sslContextFactory) { if (sslContextFactory.getNeedClientAuth()) { log.info(String.format("Port %d requires client certificate - client will provide its node certificate", port)); TransportSecurityOptions options = TransportSecurityUtils.getOptions() .orElseThrow(() -> new IllegalStateException("Vespa TLS configuration is required when using health check proxy to a port with client auth 'need'")); return new SslContextBuilder() .withKeyStore(options.getPrivateKeyFile().get(), options.getCertificatesFile().get()) .withTrustManager(new TrustAllX509TrustManager()) .build(); } else { log.info(String.format( "Port %d does not require a client certificate - client will not provide a certificate", port)); return new SslContextBuilder() .withTrustManager(new TrustAllX509TrustManager()) .build(); } } @Override public void close() throws IOException { synchronized (this) { if (client != null) { client.close(); client = null; } } } }
Yes, it's a bad interface. The servlet has no clue on how much data to produce. This method typically implemented as a `while` loop if the servlet had multiple byte buffers to write.
public void run() { StatusResponse statusResponse = target.requestStatusHtml(); servletResponse.setStatus(statusResponse.statusCode); if (statusResponse.contentType != null) { servletResponse.setHeader("Content-Type", statusResponse.contentType); } output.setWriteListener(new WriteListener() { @Override public void onWritePossible() throws IOException { if (output.isReady()) { if (statusResponse.content != null) { output.write(statusResponse.content); } asyncContext.complete(); } } @Override public void onError(Throwable t) { log.log(Level.FINE, t, () -> "Failed to write status response: " + t.getMessage()); asyncContext.complete(); } }); }
asyncContext.complete();
public void run() { StatusResponse statusResponse = target.requestStatusHtml(); servletResponse.setStatus(statusResponse.statusCode); if (statusResponse.contentType != null) { servletResponse.setHeader("Content-Type", statusResponse.contentType); } output.setWriteListener(new WriteListener() { @Override public void onWritePossible() throws IOException { if (output.isReady()) { if (statusResponse.content != null) { output.write(statusResponse.content); } asyncContext.complete(); } } @Override public void onError(Throwable t) { log.log(Level.FINE, t, () -> "Failed to write status response: " + t.getMessage()); asyncContext.complete(); } }); }
class ProxyRequestTask implements Runnable { final AsyncContext asyncContext; final ProxyTarget target; final HttpServletResponse servletResponse; final ServletOutputStream output; ProxyRequestTask(AsyncContext asyncContext, ProxyTarget target, HttpServletResponse servletResponse, ServletOutputStream output) { this.asyncContext = asyncContext; this.target = target; this.servletResponse = servletResponse; this.output = output; } @Override }
class ProxyRequestTask implements Runnable { final AsyncContext asyncContext; final ProxyTarget target; final HttpServletResponse servletResponse; final ServletOutputStream output; ProxyRequestTask(AsyncContext asyncContext, ProxyTarget target, HttpServletResponse servletResponse, ServletOutputStream output) { this.asyncContext = asyncContext; this.target = target; this.servletResponse = servletResponse; this.output = output; } @Override }
Done :)
StatusResponse requestStatusHtml() { StatusResponse response = lastResponse; if (response != null && !response.isExpired()) { return response; } StatusResponse statusResponse = getStatusResponse(); lastResponse = statusResponse; return statusResponse; }
return statusResponse;
StatusResponse requestStatusHtml() { StatusResponse response = lastResponse; if (response != null && !response.isExpired()) { return response; } return this.lastResponse = getStatusResponse(); }
class ProxyTarget implements AutoCloseable { final int port; final Duration timeout; final SslContextFactory.Server sslContextFactory; volatile CloseableHttpClient client; volatile StatusResponse lastResponse; ProxyTarget(int port, Duration timeout, SslContextFactory.Server sslContextFactory) { this.port = port; this.timeout = timeout; this.sslContextFactory = sslContextFactory; } private StatusResponse getStatusResponse() { try (CloseableHttpResponse clientResponse = client().execute(new HttpGet("https: int statusCode = clientResponse.getStatusLine().getStatusCode(); HttpEntity entity = clientResponse.getEntity(); if (entity != null) { Header contentTypeHeader = entity.getContentType(); String contentType = contentTypeHeader != null ? contentTypeHeader.getValue() : null; byte[] content = EntityUtils.toByteArray(entity); return new StatusResponse(statusCode, contentType, content); } else { return new StatusResponse(statusCode, null, null); } } catch (Exception e) { log.log(Level.FINE, e, () -> "Proxy request failed" + e.getMessage()); return new StatusResponse(500, "text/plain", e.getMessage().getBytes()); } } private CloseableHttpClient client() { if (client == null) { synchronized (this) { if (client == null) { int timeoutMillis = (int) timeout.toMillis(); client = HttpClientBuilder.create() .disableAutomaticRetries() .setMaxConnPerRoute(4) .setSSLContext(getSslContext(sslContextFactory)) .setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE) .setUserTokenHandler(context -> null) .setUserAgent("health-check-proxy-client") .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(timeoutMillis) .setConnectionRequestTimeout(timeoutMillis) .setSocketTimeout(timeoutMillis) .build()) .build(); } } } return client; } private SSLContext getSslContext(SslContextFactory.Server sslContextFactory) { if (sslContextFactory.getNeedClientAuth()) { log.info(String.format("Port %d requires client certificate - client will provide its node certificate", port)); TransportSecurityOptions options = TransportSecurityUtils.getOptions() .orElseThrow(() -> new IllegalStateException("Vespa TLS configuration is required when using health check proxy to a port with client auth 'need'")); return new SslContextBuilder() .withKeyStore(options.getPrivateKeyFile().get(), options.getCertificatesFile().get()) .withTrustManager(new TrustAllX509TrustManager()) .build(); } else { log.info(String.format( "Port %d does not require a client certificate - client will not provide a certificate", port)); return new SslContextBuilder() .withTrustManager(new TrustAllX509TrustManager()) .build(); } } @Override public void close() throws IOException { synchronized (this) { if (client != null) { client.close(); client = null; } } } }
class ProxyTarget implements AutoCloseable { final int port; final Duration timeout; final SslContextFactory.Server sslContextFactory; volatile CloseableHttpClient client; volatile StatusResponse lastResponse; ProxyTarget(int port, Duration timeout, SslContextFactory.Server sslContextFactory) { this.port = port; this.timeout = timeout; this.sslContextFactory = sslContextFactory; } private StatusResponse getStatusResponse() { try (CloseableHttpResponse clientResponse = client().execute(new HttpGet("https: int statusCode = clientResponse.getStatusLine().getStatusCode(); HttpEntity entity = clientResponse.getEntity(); if (entity != null) { Header contentTypeHeader = entity.getContentType(); String contentType = contentTypeHeader != null ? contentTypeHeader.getValue() : null; byte[] content = EntityUtils.toByteArray(entity); return new StatusResponse(statusCode, contentType, content); } else { return new StatusResponse(statusCode, null, null); } } catch (Exception e) { log.log(Level.FINE, e, () -> "Proxy request failed" + e.getMessage()); return new StatusResponse(500, "text/plain", e.getMessage().getBytes()); } } private CloseableHttpClient client() { if (client == null) { synchronized (this) { if (client == null) { int timeoutMillis = (int) timeout.toMillis(); client = HttpClientBuilder.create() .disableAutomaticRetries() .setMaxConnPerRoute(4) .setSSLContext(getSslContext(sslContextFactory)) .setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE) .setUserTokenHandler(context -> null) .setUserAgent("health-check-proxy-client") .setDefaultRequestConfig( RequestConfig.custom() .setConnectTimeout(timeoutMillis) .setConnectionRequestTimeout(timeoutMillis) .setSocketTimeout(timeoutMillis) .build()) .build(); } } } return client; } private SSLContext getSslContext(SslContextFactory.Server sslContextFactory) { if (sslContextFactory.getNeedClientAuth()) { log.info(String.format("Port %d requires client certificate - client will provide its node certificate", port)); TransportSecurityOptions options = TransportSecurityUtils.getOptions() .orElseThrow(() -> new IllegalStateException("Vespa TLS configuration is required when using health check proxy to a port with client auth 'need'")); return new SslContextBuilder() .withKeyStore(options.getPrivateKeyFile().get(), options.getCertificatesFile().get()) .withTrustManager(new TrustAllX509TrustManager()) .build(); } else { log.info(String.format( "Port %d does not require a client certificate - client will not provide a certificate", port)); return new SslContextBuilder() .withTrustManager(new TrustAllX509TrustManager()) .build(); } } @Override public void close() throws IOException { synchronized (this) { if (client != null) { client.close(); client = null; } } } }
I don't think this one can be removed yet?
public void restart(ApplicationId application, HostFilter filter) { nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter))); }
}
public void restart(ApplicationId application, HostFilter filter) { nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter))); }
class NodeRepositoryProvisioner implements Provisioner { private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName()); private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; private final CapacityPolicies capacityPolicies; private final Zone zone; private final Preparer preparer; private final Activator activator; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final NodeResourceLimits nodeResourceLimits; private final IntFlag tenantNodeQuota; @Inject public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone, ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); this.capacityPolicies = new CapacityPolicies(nodeRepository); this.zone = zone; this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource)); this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.preparer = new Preparer(nodeRepository, flagSource, provisionServiceProvider.getHostProvisioner(), loadBalancerProvisioner); this.activator = new Activator(nodeRepository, loadBalancerProvisioner); this.tenantNodeQuota = Flags.TENANT_NODE_QUOTA.bindTo(flagSource); } /** * Returns a list of nodes in the prepared or active state, matching the given constraints. * The nodes are ordered by increasing index number. */ @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); if ( ! hasQuota(application, requested.maxResources().nodes())) throw new IllegalArgumentException(requested + " requested for " + cluster + ". Max value exceeds your quota. Resolve this at https: nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), cluster); nodeResourceLimits.ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), cluster); int groups; NodeResources resources; NodeSpec nodeSpec; if ( requested.type() == NodeType.tenant) { ClusterResources target = decideTargetResources(application, cluster, requested); int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); groups = Math.min(target.groups(), nodeCount); resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive()); nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); logIfDownscaled(target.nodes(), nodeCount, cluster, logger); } else { groups = 1; resources = requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); } @Override public void activate(Collection<HostSpec> hosts, ActivationContext context, ApplicationTransaction transaction) { validate(hosts); activator.activate(hosts, context.generation(), transaction); } @Override @Override public void remove(ApplicationTransaction transaction) { nodeRepository.deactivate(transaction); loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(transaction)); } @Override public ProvisionLock lock(ApplicationId application) { return new ProvisionLock(application, nodeRepository.lock(application)); } /** * Returns the target cluster resources, a value between the min and max in the requested capacity, * and updates the application store with the received min and max. */ private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.lock(applicationId)) { Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId)); application = application.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); nodeRepository.applications().put(application, lock); return application.clusters().get(clusterSpec.id()).targetResources() .orElseGet(() -> currentResources(applicationId, clusterSpec, requested)); } } /** Returns the current resources of this cluster, or the closes */ private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active)) .cluster(clusterSpec.id()) .not().retired() .not().removable() .asList(); boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment ? new AllocatableClusterResources(requested.minResources(), clusterSpec.type(), clusterSpec.isExclusive(), nodeRepository) : new AllocatableClusterResources(nodes, nodeRepository); return within(Limits.of(requested), clusterSpec.isExclusive(), currentResources, firstDeployment); } /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, boolean exclusive, AllocatableClusterResources current, boolean firstDeployment) { if (limits.min().equals(limits.max())) return limits.min(); var currentAsAdvertised = current.toAdvertisedClusterResources(); if (! firstDeployment && currentAsAdvertised.isWithin(limits.min(), limits.max())) return currentAsAdvertised; return allocationOptimizer.findBestAllocation(ResourceTarget.preserve(current), current, limits, exclusive) .orElseThrow(() -> new IllegalArgumentException("No allocation possible within " + limits)) .toAdvertisedClusterResources(); } private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + ", downscaling to " + actualNodes + " nodes in " + zone.environment()); } private boolean hasQuota(ApplicationId application, int requestedNodes) { if ( ! this.zone.system().isPublic()) return true; if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60; if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75; return requestedNodes <= tenantNodeQuota.with(FetchVector.Dimension.APPLICATION_ID, application.tenant().value()).value(); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index())); List<HostSpec> hosts = new ArrayList<>(nodes.size()); for (Node node : nodes) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " - " + node.flavor()); Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new); hosts.add(new HostSpec(node.hostname(), nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository), node.flavor().resources(), requestedResources, nodeAllocation.membership(), node.status().vespaVersion(), nodeAllocation.networkPorts(), node.status().dockerImage())); if (nodeAllocation.networkPorts().isPresent()) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " has port allocations"); } } return hosts; } private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if (host.membership().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if (host.membership().get().cluster().group().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } } }
class NodeRepositoryProvisioner implements Provisioner { private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName()); private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; private final CapacityPolicies capacityPolicies; private final Zone zone; private final Preparer preparer; private final Activator activator; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final NodeResourceLimits nodeResourceLimits; private final IntFlag tenantNodeQuota; @Inject public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone, ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); this.capacityPolicies = new CapacityPolicies(nodeRepository); this.zone = zone; this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource)); this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.preparer = new Preparer(nodeRepository, flagSource, provisionServiceProvider.getHostProvisioner(), loadBalancerProvisioner); this.activator = new Activator(nodeRepository, loadBalancerProvisioner); this.tenantNodeQuota = Flags.TENANT_NODE_QUOTA.bindTo(flagSource); } /** * Returns a list of nodes in the prepared or active state, matching the given constraints. * The nodes are ordered by increasing index number. */ @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); if ( ! hasQuota(application, requested.maxResources().nodes())) throw new IllegalArgumentException(requested + " requested for " + cluster + ". Max value exceeds your quota. Resolve this at https: nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), cluster); nodeResourceLimits.ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), cluster); int groups; NodeResources resources; NodeSpec nodeSpec; if ( requested.type() == NodeType.tenant) { ClusterResources target = decideTargetResources(application, cluster, requested); int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); groups = Math.min(target.groups(), nodeCount); resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive()); nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); logIfDownscaled(target.nodes(), nodeCount, cluster, logger); } else { groups = 1; resources = requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); } @Override public void activate(Collection<HostSpec> hosts, ActivationContext context, ApplicationTransaction transaction) { validate(hosts); activator.activate(hosts, context.generation(), transaction); } @Override @Override public void remove(ApplicationTransaction transaction) { nodeRepository.deactivate(transaction); loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(transaction)); } @Override public ProvisionLock lock(ApplicationId application) { return new ProvisionLock(application, nodeRepository.lock(application)); } /** * Returns the target cluster resources, a value between the min and max in the requested capacity, * and updates the application store with the received min and max. */ private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.lock(applicationId)) { Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId)); application = application.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); nodeRepository.applications().put(application, lock); return application.clusters().get(clusterSpec.id()).targetResources() .orElseGet(() -> currentResources(applicationId, clusterSpec, requested)); } } /** Returns the current resources of this cluster, or the closes */ private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active)) .cluster(clusterSpec.id()) .not().retired() .not().removable() .asList(); boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment ? new AllocatableClusterResources(requested.minResources(), clusterSpec.type(), clusterSpec.isExclusive(), nodeRepository) : new AllocatableClusterResources(nodes, nodeRepository); return within(Limits.of(requested), clusterSpec.isExclusive(), currentResources, firstDeployment); } /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, boolean exclusive, AllocatableClusterResources current, boolean firstDeployment) { if (limits.min().equals(limits.max())) return limits.min(); var currentAsAdvertised = current.toAdvertisedClusterResources(); if (! firstDeployment && currentAsAdvertised.isWithin(limits.min(), limits.max())) return currentAsAdvertised; return allocationOptimizer.findBestAllocation(ResourceTarget.preserve(current), current, limits, exclusive) .orElseThrow(() -> new IllegalArgumentException("No allocation possible within " + limits)) .toAdvertisedClusterResources(); } private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + ", downscaling to " + actualNodes + " nodes in " + zone.environment()); } private boolean hasQuota(ApplicationId application, int requestedNodes) { if ( ! this.zone.system().isPublic()) return true; if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60; if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75; return requestedNodes <= tenantNodeQuota.with(FetchVector.Dimension.APPLICATION_ID, application.tenant().value()).value(); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index())); List<HostSpec> hosts = new ArrayList<>(nodes.size()); for (Node node : nodes) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " - " + node.flavor()); Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new); hosts.add(new HostSpec(node.hostname(), nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository), node.flavor().resources(), requestedResources, nodeAllocation.membership(), node.status().vespaVersion(), nodeAllocation.networkPorts(), node.status().dockerImage())); if (nodeAllocation.networkPorts().isPresent()) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " has port allocations"); } } return hosts; } private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if (host.membership().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if (host.membership().get().cluster().group().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } } }
None of these are called from the config-model that I can tell. It uses `HostProvisioner` instead, which only has `prepare`.
public void restart(ApplicationId application, HostFilter filter) { nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter))); }
}
public void restart(ApplicationId application, HostFilter filter) { nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter))); }
class NodeRepositoryProvisioner implements Provisioner { private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName()); private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; private final CapacityPolicies capacityPolicies; private final Zone zone; private final Preparer preparer; private final Activator activator; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final NodeResourceLimits nodeResourceLimits; private final IntFlag tenantNodeQuota; @Inject public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone, ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); this.capacityPolicies = new CapacityPolicies(nodeRepository); this.zone = zone; this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource)); this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.preparer = new Preparer(nodeRepository, flagSource, provisionServiceProvider.getHostProvisioner(), loadBalancerProvisioner); this.activator = new Activator(nodeRepository, loadBalancerProvisioner); this.tenantNodeQuota = Flags.TENANT_NODE_QUOTA.bindTo(flagSource); } /** * Returns a list of nodes in the prepared or active state, matching the given constraints. * The nodes are ordered by increasing index number. */ @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); if ( ! hasQuota(application, requested.maxResources().nodes())) throw new IllegalArgumentException(requested + " requested for " + cluster + ". Max value exceeds your quota. Resolve this at https: nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), cluster); nodeResourceLimits.ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), cluster); int groups; NodeResources resources; NodeSpec nodeSpec; if ( requested.type() == NodeType.tenant) { ClusterResources target = decideTargetResources(application, cluster, requested); int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); groups = Math.min(target.groups(), nodeCount); resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive()); nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); logIfDownscaled(target.nodes(), nodeCount, cluster, logger); } else { groups = 1; resources = requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); } @Override public void activate(Collection<HostSpec> hosts, ActivationContext context, ApplicationTransaction transaction) { validate(hosts); activator.activate(hosts, context.generation(), transaction); } @Override @Override public void remove(ApplicationTransaction transaction) { nodeRepository.deactivate(transaction); loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(transaction)); } @Override public ProvisionLock lock(ApplicationId application) { return new ProvisionLock(application, nodeRepository.lock(application)); } /** * Returns the target cluster resources, a value between the min and max in the requested capacity, * and updates the application store with the received min and max. */ private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.lock(applicationId)) { Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId)); application = application.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); nodeRepository.applications().put(application, lock); return application.clusters().get(clusterSpec.id()).targetResources() .orElseGet(() -> currentResources(applicationId, clusterSpec, requested)); } } /** Returns the current resources of this cluster, or the closes */ private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active)) .cluster(clusterSpec.id()) .not().retired() .not().removable() .asList(); boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment ? new AllocatableClusterResources(requested.minResources(), clusterSpec.type(), clusterSpec.isExclusive(), nodeRepository) : new AllocatableClusterResources(nodes, nodeRepository); return within(Limits.of(requested), clusterSpec.isExclusive(), currentResources, firstDeployment); } /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, boolean exclusive, AllocatableClusterResources current, boolean firstDeployment) { if (limits.min().equals(limits.max())) return limits.min(); var currentAsAdvertised = current.toAdvertisedClusterResources(); if (! firstDeployment && currentAsAdvertised.isWithin(limits.min(), limits.max())) return currentAsAdvertised; return allocationOptimizer.findBestAllocation(ResourceTarget.preserve(current), current, limits, exclusive) .orElseThrow(() -> new IllegalArgumentException("No allocation possible within " + limits)) .toAdvertisedClusterResources(); } private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + ", downscaling to " + actualNodes + " nodes in " + zone.environment()); } private boolean hasQuota(ApplicationId application, int requestedNodes) { if ( ! this.zone.system().isPublic()) return true; if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60; if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75; return requestedNodes <= tenantNodeQuota.with(FetchVector.Dimension.APPLICATION_ID, application.tenant().value()).value(); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index())); List<HostSpec> hosts = new ArrayList<>(nodes.size()); for (Node node : nodes) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " - " + node.flavor()); Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new); hosts.add(new HostSpec(node.hostname(), nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository), node.flavor().resources(), requestedResources, nodeAllocation.membership(), node.status().vespaVersion(), nodeAllocation.networkPorts(), node.status().dockerImage())); if (nodeAllocation.networkPorts().isPresent()) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " has port allocations"); } } return hosts; } private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if (host.membership().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if (host.membership().get().cluster().group().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } } }
class NodeRepositoryProvisioner implements Provisioner { private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName()); private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; private final CapacityPolicies capacityPolicies; private final Zone zone; private final Preparer preparer; private final Activator activator; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final NodeResourceLimits nodeResourceLimits; private final IntFlag tenantNodeQuota; @Inject public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone, ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) { this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); this.capacityPolicies = new CapacityPolicies(nodeRepository); this.zone = zone; this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource)); this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.preparer = new Preparer(nodeRepository, flagSource, provisionServiceProvider.getHostProvisioner(), loadBalancerProvisioner); this.activator = new Activator(nodeRepository, loadBalancerProvisioner); this.tenantNodeQuota = Flags.TENANT_NODE_QUOTA.bindTo(flagSource); } /** * Returns a list of nodes in the prepared or active state, matching the given constraints. * The nodes are ordered by increasing index number. */ @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); if ( ! hasQuota(application, requested.maxResources().nodes())) throw new IllegalArgumentException(requested + " requested for " + cluster + ". Max value exceeds your quota. Resolve this at https: nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), cluster); nodeResourceLimits.ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), cluster); int groups; NodeResources resources; NodeSpec nodeSpec; if ( requested.type() == NodeType.tenant) { ClusterResources target = decideTargetResources(application, cluster, requested); int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); groups = Math.min(target.groups(), nodeCount); resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive()); nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); logIfDownscaled(target.nodes(), nodeCount, cluster, logger); } else { groups = 1; resources = requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); } @Override public void activate(Collection<HostSpec> hosts, ActivationContext context, ApplicationTransaction transaction) { validate(hosts); activator.activate(hosts, context.generation(), transaction); } @Override @Override public void remove(ApplicationTransaction transaction) { nodeRepository.deactivate(transaction); loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(transaction)); } @Override public ProvisionLock lock(ApplicationId application) { return new ProvisionLock(application, nodeRepository.lock(application)); } /** * Returns the target cluster resources, a value between the min and max in the requested capacity, * and updates the application store with the received min and max. */ private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.lock(applicationId)) { Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId)); application = application.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); nodeRepository.applications().put(application, lock); return application.clusters().get(clusterSpec.id()).targetResources() .orElseGet(() -> currentResources(applicationId, clusterSpec, requested)); } } /** Returns the current resources of this cluster, or the closes */ private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active)) .cluster(clusterSpec.id()) .not().retired() .not().removable() .asList(); boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment ? new AllocatableClusterResources(requested.minResources(), clusterSpec.type(), clusterSpec.isExclusive(), nodeRepository) : new AllocatableClusterResources(nodes, nodeRepository); return within(Limits.of(requested), clusterSpec.isExclusive(), currentResources, firstDeployment); } /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, boolean exclusive, AllocatableClusterResources current, boolean firstDeployment) { if (limits.min().equals(limits.max())) return limits.min(); var currentAsAdvertised = current.toAdvertisedClusterResources(); if (! firstDeployment && currentAsAdvertised.isWithin(limits.min(), limits.max())) return currentAsAdvertised; return allocationOptimizer.findBestAllocation(ResourceTarget.preserve(current), current, limits, exclusive) .orElseThrow(() -> new IllegalArgumentException("No allocation possible within " + limits)) .toAdvertisedClusterResources(); } private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + ", downscaling to " + actualNodes + " nodes in " + zone.environment()); } private boolean hasQuota(ApplicationId application, int requestedNodes) { if ( ! this.zone.system().isPublic()) return true; if (application.tenant().value().hashCode() == 3857) return requestedNodes <= 60; if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75; return requestedNodes <= tenantNodeQuota.with(FetchVector.Dimension.APPLICATION_ID, application.tenant().value()).value(); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index())); List<HostSpec> hosts = new ArrayList<>(nodes.size()); for (Node node : nodes) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " - " + node.flavor()); Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new); hosts.add(new HostSpec(node.hostname(), nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository), node.flavor().resources(), requestedResources, nodeAllocation.membership(), node.status().vespaVersion(), nodeAllocation.networkPorts(), node.status().dockerImage())); if (nodeAllocation.networkPorts().isPresent()) { log.log(Level.FINE, () -> "Prepared node " + node.hostname() + " has port allocations"); } } return hosts; } private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if (host.membership().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if (host.membership().get().cluster().group().isEmpty()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } } }
If this implements equals/hashCode you can simply return `this.equals(EMPTY)`. Same for the other classes. This will also simplify serialization testing.
public boolean isEmpty() { return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty() && address.isEmpty() && billingContact.isEmpty(); }
return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty()
public boolean isEmpty() { return this.equals(EMPTY); }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static TenantInfo EmptyInfo = new TenantInfo("","","", "", "", "", TenantInfoAddress.EmptyAddress, TenantInfoBillingContact.EmptyBillingContact); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static final TenantInfo EMPTY = new TenantInfo("","","", "", "", "", TenantInfoAddress.EMPTY, TenantInfoBillingContact.EMPTY); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TenantInfo that = (TenantInfo) o; return name.equals(that.name) && email.equals(that.email) && website.equals(that.website) && contactName.equals(that.contactName) && contactEmail.equals(that.contactEmail) && invoiceEmail.equals(that.invoiceEmail) && address.equals(that.address) && billingContact.equals(that.billingContact); } @Override public int hashCode() { return Objects.hash(name, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } }
This can be removed now since this is 1:1 with presence of `HostProvisioner` and this maintainer is only created if it is set in `NodeRepositoryMaintenance`
private List<Node> provision(List<NodeResources> advertisedSpareCapacity, NodeList nodes) { if (!nodeRepository().zone().getCloud().dynamicProvisioning()) { return List.of(); } Map<String, Node> hostsByHostname = new HashMap<>(nodes.hosts().asList().stream() .filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision()) .collect(Collectors.toMap(Node::hostname, Function.identity()))); nodes.asList().stream() .filter(node -> node.allocation().isPresent()) .flatMap(node -> node.parentHostname().stream()) .distinct() .forEach(hostsByHostname::remove); List<Node> excessHosts = new ArrayList<>(hostsByHostname.values()); var capacity = new ArrayList<>(advertisedSpareCapacity); for (Iterator<NodeResources> it = capacity.iterator(); it.hasNext() && !excessHosts.isEmpty(); ) { NodeResources resources = it.next(); excessHosts.stream() .filter(nodeRepository()::canAllocateTenantNodeTo) .filter(host -> nodeRepository().resourcesCalculator() .advertisedResourcesOf(host.flavor()) .satisfies(resources)) .min(Comparator.comparingInt(n -> n.flavor().cost())) .ifPresent(host -> { excessHosts.remove(host); it.remove(); }); } capacity.forEach(resources -> { try { Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion); List<Node> hosts = hostProvisioner.provisionHosts(nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId, osVersion, false) .stream() .map(ProvisionedHost::generateHost) .collect(Collectors.toList()); nodeRepository().addNodes(hosts, Agent.DynamicProvisioningMaintainer); } catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ": " + e.getMessage()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e); } }); return excessHosts; }
if (!nodeRepository().zone().getCloud().dynamicProvisioning()) {
private List<Node> provision(List<NodeResources> advertisedSpareCapacity, NodeList nodes) { Map<String, Node> hostsByHostname = new HashMap<>(nodes.hosts().asList().stream() .filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision()) .collect(Collectors.toMap(Node::hostname, Function.identity()))); nodes.asList().stream() .filter(node -> node.allocation().isPresent()) .flatMap(node -> node.parentHostname().stream()) .distinct() .forEach(hostsByHostname::remove); List<Node> excessHosts = new ArrayList<>(hostsByHostname.values()); var capacity = new ArrayList<>(advertisedSpareCapacity); for (Iterator<NodeResources> it = capacity.iterator(); it.hasNext() && !excessHosts.isEmpty(); ) { NodeResources resources = it.next(); excessHosts.stream() .filter(nodeRepository()::canAllocateTenantNodeTo) .filter(host -> nodeRepository().resourcesCalculator() .advertisedResourcesOf(host.flavor()) .satisfies(resources)) .min(Comparator.comparingInt(n -> n.flavor().cost())) .ifPresent(host -> { excessHosts.remove(host); it.remove(); }); } capacity.forEach(resources -> { try { Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion); List<Node> hosts = hostProvisioner.provisionHosts(nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId, osVersion, false) .stream() .map(ProvisionedHost::generateHost) .collect(Collectors.toList()); nodeRepository().addNodes(hosts, Agent.DynamicProvisioningMaintainer); } catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ": " + e.getMessage()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e); } }); return excessHosts; }
class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName()); private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision"); private final HostProvisioner hostProvisioner; private final ListFlag<HostCapacity> targetCapacityFlag; DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval, HostProvisioner hostProvisioner, FlagSource flagSource, Metric metric) { super(nodeRepository, interval, metric); this.hostProvisioner = hostProvisioner; this.targetCapacityFlag = Flags.TARGET_CAPACITY.bindTo(flagSource); } @Override protected boolean maintain() { try (Mutex lock = nodeRepository().lockUnallocated()) { NodeList nodes = nodeRepository().list(); resumeProvisioning(nodes, lock); convergeToCapacity(nodes); } return true; } /** Resume provisioning of already provisioned hosts and their children */ private void resumeProvisioning(NodeList nodes, Mutex lock) { Map<String, Set<Node>> nodesByProvisionedParentHostname = nodes.nodeType(NodeType.tenant).asList().stream() .filter(node -> node.parentHostname().isPresent()) .collect(Collectors.groupingBy( node -> node.parentHostname().get(), Collectors.toSet())); nodes.state(Node.State.provisioned).hosts().forEach(host -> { Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of()); try { List<Node> updatedNodes = hostProvisioner.provision(host, children); verifyDns(updatedNodes); nodeRepository().write(updatedNodes, lock); } catch (IllegalArgumentException | IllegalStateException e) { log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " + Exceptions.toMessageString(e)); } catch (FatalProvisioningException e) { log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() + " children, failing out the host recursively", e); nodeRepository().failRecursively( host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** Converge zone to wanted capacity */ private void convergeToCapacity(NodeList nodes) { List<NodeResources> capacity = targetCapacity(); List<Node> excessHosts = provision(capacity, nodes); excessHosts.forEach(host -> { try { hostProvisioner.deprovision(host); nodeRepository().removeRecursively(host, true); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** * Provision hosts to ensure there is room to allocate spare nodes. * * @param advertisedSpareCapacity the advertised resources of the spare nodes * @param nodes list of all nodes * @return excess hosts that can safely be deprovisioned: An excess host 1. contains no nodes allocated * to an application, and assuming the spare nodes have been allocated, and 2. is not parked * without wantToDeprovision (which means an operator is looking at the node). */ /** Reads node resources declared by target capacity flag */ private List<NodeResources> targetCapacity() { return targetCapacityFlag.value().stream() .flatMap(cap -> { NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1); return IntStream.range(0, cap.getCount()).mapToObj(i -> resources); }) .sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed()) .collect(Collectors.toList()); } /** Verify DNS configuration of given nodes */ private void verifyDns(List<Node> nodes) { for (var node : nodes) { for (var ipAddress : node.ipConfig().primary()) { IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver()); } } } }
class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName()); private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision"); private final HostProvisioner hostProvisioner; private final ListFlag<HostCapacity> targetCapacityFlag; DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval, HostProvisioner hostProvisioner, FlagSource flagSource, Metric metric) { super(nodeRepository, interval, metric); this.hostProvisioner = hostProvisioner; this.targetCapacityFlag = Flags.TARGET_CAPACITY.bindTo(flagSource); } @Override protected boolean maintain() { try (Mutex lock = nodeRepository().lockUnallocated()) { NodeList nodes = nodeRepository().list(); resumeProvisioning(nodes, lock); convergeToCapacity(nodes); } return true; } /** Resume provisioning of already provisioned hosts and their children */ private void resumeProvisioning(NodeList nodes, Mutex lock) { Map<String, Set<Node>> nodesByProvisionedParentHostname = nodes.nodeType(NodeType.tenant).asList().stream() .filter(node -> node.parentHostname().isPresent()) .collect(Collectors.groupingBy( node -> node.parentHostname().get(), Collectors.toSet())); nodes.state(Node.State.provisioned).hosts().forEach(host -> { Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of()); try { List<Node> updatedNodes = hostProvisioner.provision(host, children); verifyDns(updatedNodes); nodeRepository().write(updatedNodes, lock); } catch (IllegalArgumentException | IllegalStateException e) { log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " + Exceptions.toMessageString(e)); } catch (FatalProvisioningException e) { log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() + " children, failing out the host recursively", e); nodeRepository().failRecursively( host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** Converge zone to wanted capacity */ private void convergeToCapacity(NodeList nodes) { List<NodeResources> capacity = targetCapacity(); List<Node> excessHosts = provision(capacity, nodes); excessHosts.forEach(host -> { try { hostProvisioner.deprovision(host); nodeRepository().removeRecursively(host, true); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** * Provision hosts to ensure there is room to allocate spare nodes. * * @param advertisedSpareCapacity the advertised resources of the spare nodes * @param nodes list of all nodes * @return excess hosts that can safely be deprovisioned: An excess host 1. contains no nodes allocated * to an application, and assuming the spare nodes have been allocated, and 2. is not parked * without wantToDeprovision (which means an operator is looking at the node). */ /** Reads node resources declared by target capacity flag */ private List<NodeResources> targetCapacity() { return targetCapacityFlag.value().stream() .flatMap(cap -> { NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1); return IntStream.range(0, cap.getCount()).mapToObj(i -> resources); }) .sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed()) .collect(Collectors.toList()); } /** Verify DNS configuration of given nodes */ private void verifyDns(List<Node> nodes) { for (var node : nodes) { for (var ipAddress : node.ipConfig().primary()) { IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver()); } } } }
nice
private List<Node> provision(List<NodeResources> advertisedSpareCapacity, NodeList nodes) { if (!nodeRepository().zone().getCloud().dynamicProvisioning()) { return List.of(); } Map<String, Node> hostsByHostname = new HashMap<>(nodes.hosts().asList().stream() .filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision()) .collect(Collectors.toMap(Node::hostname, Function.identity()))); nodes.asList().stream() .filter(node -> node.allocation().isPresent()) .flatMap(node -> node.parentHostname().stream()) .distinct() .forEach(hostsByHostname::remove); List<Node> excessHosts = new ArrayList<>(hostsByHostname.values()); var capacity = new ArrayList<>(advertisedSpareCapacity); for (Iterator<NodeResources> it = capacity.iterator(); it.hasNext() && !excessHosts.isEmpty(); ) { NodeResources resources = it.next(); excessHosts.stream() .filter(nodeRepository()::canAllocateTenantNodeTo) .filter(host -> nodeRepository().resourcesCalculator() .advertisedResourcesOf(host.flavor()) .satisfies(resources)) .min(Comparator.comparingInt(n -> n.flavor().cost())) .ifPresent(host -> { excessHosts.remove(host); it.remove(); }); } capacity.forEach(resources -> { try { Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion); List<Node> hosts = hostProvisioner.provisionHosts(nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId, osVersion, false) .stream() .map(ProvisionedHost::generateHost) .collect(Collectors.toList()); nodeRepository().addNodes(hosts, Agent.DynamicProvisioningMaintainer); } catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ": " + e.getMessage()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e); } }); return excessHosts; }
if (!nodeRepository().zone().getCloud().dynamicProvisioning()) {
private List<Node> provision(List<NodeResources> advertisedSpareCapacity, NodeList nodes) { Map<String, Node> hostsByHostname = new HashMap<>(nodes.hosts().asList().stream() .filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision()) .collect(Collectors.toMap(Node::hostname, Function.identity()))); nodes.asList().stream() .filter(node -> node.allocation().isPresent()) .flatMap(node -> node.parentHostname().stream()) .distinct() .forEach(hostsByHostname::remove); List<Node> excessHosts = new ArrayList<>(hostsByHostname.values()); var capacity = new ArrayList<>(advertisedSpareCapacity); for (Iterator<NodeResources> it = capacity.iterator(); it.hasNext() && !excessHosts.isEmpty(); ) { NodeResources resources = it.next(); excessHosts.stream() .filter(nodeRepository()::canAllocateTenantNodeTo) .filter(host -> nodeRepository().resourcesCalculator() .advertisedResourcesOf(host.flavor()) .satisfies(resources)) .min(Comparator.comparingInt(n -> n.flavor().cost())) .ifPresent(host -> { excessHosts.remove(host); it.remove(); }); } capacity.forEach(resources -> { try { Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion); List<Node> hosts = hostProvisioner.provisionHosts(nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId, osVersion, false) .stream() .map(ProvisionedHost::generateHost) .collect(Collectors.toList()); nodeRepository().addNodes(hosts, Agent.DynamicProvisioningMaintainer); } catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ": " + e.getMessage()); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e); } }); return excessHosts; }
class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName()); private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision"); private final HostProvisioner hostProvisioner; private final ListFlag<HostCapacity> targetCapacityFlag; DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval, HostProvisioner hostProvisioner, FlagSource flagSource, Metric metric) { super(nodeRepository, interval, metric); this.hostProvisioner = hostProvisioner; this.targetCapacityFlag = Flags.TARGET_CAPACITY.bindTo(flagSource); } @Override protected boolean maintain() { try (Mutex lock = nodeRepository().lockUnallocated()) { NodeList nodes = nodeRepository().list(); resumeProvisioning(nodes, lock); convergeToCapacity(nodes); } return true; } /** Resume provisioning of already provisioned hosts and their children */ private void resumeProvisioning(NodeList nodes, Mutex lock) { Map<String, Set<Node>> nodesByProvisionedParentHostname = nodes.nodeType(NodeType.tenant).asList().stream() .filter(node -> node.parentHostname().isPresent()) .collect(Collectors.groupingBy( node -> node.parentHostname().get(), Collectors.toSet())); nodes.state(Node.State.provisioned).hosts().forEach(host -> { Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of()); try { List<Node> updatedNodes = hostProvisioner.provision(host, children); verifyDns(updatedNodes); nodeRepository().write(updatedNodes, lock); } catch (IllegalArgumentException | IllegalStateException e) { log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " + Exceptions.toMessageString(e)); } catch (FatalProvisioningException e) { log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() + " children, failing out the host recursively", e); nodeRepository().failRecursively( host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** Converge zone to wanted capacity */ private void convergeToCapacity(NodeList nodes) { List<NodeResources> capacity = targetCapacity(); List<Node> excessHosts = provision(capacity, nodes); excessHosts.forEach(host -> { try { hostProvisioner.deprovision(host); nodeRepository().removeRecursively(host, true); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** * Provision hosts to ensure there is room to allocate spare nodes. * * @param advertisedSpareCapacity the advertised resources of the spare nodes * @param nodes list of all nodes * @return excess hosts that can safely be deprovisioned: An excess host 1. contains no nodes allocated * to an application, and assuming the spare nodes have been allocated, and 2. is not parked * without wantToDeprovision (which means an operator is looking at the node). */ /** Reads node resources declared by target capacity flag */ private List<NodeResources> targetCapacity() { return targetCapacityFlag.value().stream() .flatMap(cap -> { NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1); return IntStream.range(0, cap.getCount()).mapToObj(i -> resources); }) .sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed()) .collect(Collectors.toList()); } /** Verify DNS configuration of given nodes */ private void verifyDns(List<Node> nodes) { for (var node : nodes) { for (var ipAddress : node.ipConfig().primary()) { IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver()); } } } }
class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName()); private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision"); private final HostProvisioner hostProvisioner; private final ListFlag<HostCapacity> targetCapacityFlag; DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval, HostProvisioner hostProvisioner, FlagSource flagSource, Metric metric) { super(nodeRepository, interval, metric); this.hostProvisioner = hostProvisioner; this.targetCapacityFlag = Flags.TARGET_CAPACITY.bindTo(flagSource); } @Override protected boolean maintain() { try (Mutex lock = nodeRepository().lockUnallocated()) { NodeList nodes = nodeRepository().list(); resumeProvisioning(nodes, lock); convergeToCapacity(nodes); } return true; } /** Resume provisioning of already provisioned hosts and their children */ private void resumeProvisioning(NodeList nodes, Mutex lock) { Map<String, Set<Node>> nodesByProvisionedParentHostname = nodes.nodeType(NodeType.tenant).asList().stream() .filter(node -> node.parentHostname().isPresent()) .collect(Collectors.groupingBy( node -> node.parentHostname().get(), Collectors.toSet())); nodes.state(Node.State.provisioned).hosts().forEach(host -> { Set<Node> children = nodesByProvisionedParentHostname.getOrDefault(host.hostname(), Set.of()); try { List<Node> updatedNodes = hostProvisioner.provision(host, children); verifyDns(updatedNodes); nodeRepository().write(updatedNodes, lock); } catch (IllegalArgumentException | IllegalStateException e) { log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " + Exceptions.toMessageString(e)); } catch (FatalProvisioningException e) { log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() + " children, failing out the host recursively", e); nodeRepository().failRecursively( host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** Converge zone to wanted capacity */ private void convergeToCapacity(NodeList nodes) { List<NodeResources> capacity = targetCapacity(); List<Node> excessHosts = provision(capacity, nodes); excessHosts.forEach(host -> { try { hostProvisioner.deprovision(host); nodeRepository().removeRecursively(host, true); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e); } }); } /** * Provision hosts to ensure there is room to allocate spare nodes. * * @param advertisedSpareCapacity the advertised resources of the spare nodes * @param nodes list of all nodes * @return excess hosts that can safely be deprovisioned: An excess host 1. contains no nodes allocated * to an application, and assuming the spare nodes have been allocated, and 2. is not parked * without wantToDeprovision (which means an operator is looking at the node). */ /** Reads node resources declared by target capacity flag */ private List<NodeResources> targetCapacity() { return targetCapacityFlag.value().stream() .flatMap(cap -> { NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1); return IntStream.range(0, cap.getCount()).mapToObj(i -> resources); }) .sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed()) .collect(Collectors.toList()); } /** Verify DNS configuration of given nodes */ private void verifyDns(List<Node> nodes) { for (var node : nodes) { for (var ipAddress : node.ipConfig().primary()) { IP.verifyDns(node.hostname(), ipAddress, nodeRepository().nameResolver()); } } } }
Include repo in the tag value as well?
private void sampleDuration() { Gauge gauge = metrics.declareGauge("docker.imagePullDurationSecs", new Dimensions(Map.of("tag", dockerImage.tagAsVersion().toFullString()))); Duration pullDuration = Duration.between(startedAt, clock.instant()); gauge.sample(pullDuration.getSeconds()); }
new Dimensions(Map.of("tag", dockerImage.tagAsVersion().toFullString())));
private void sampleDuration() { Gauge gauge = metrics.declareGauge("docker.imagePullDurationSecs", new Dimensions(Map.of("image", dockerImage.asString()))); Duration pullDuration = Duration.between(startedAt, clock.instant()); gauge.sample(pullDuration.getSeconds()); }
class ImagePullCallback extends PullImageResultCallback { private final DockerImage dockerImage; private final Instant startedAt; private ImagePullCallback(DockerImage dockerImage) { this.dockerImage = dockerImage; this.startedAt = clock.instant(); } @Override public void onError(Throwable throwable) { removeScheduledPoll(dockerImage); logger.log(Level.SEVERE, "Could not download image " + dockerImage.asString(), throwable); } @Override public void onComplete() { if (imageIsDownloaded(dockerImage)) { logger.log(Level.INFO, "Download completed: " + dockerImage.asString()); removeScheduledPoll(dockerImage); } else { numberOfDockerApiFails.increment(); throw new DockerClientException("Could not download image: " + dockerImage); } sampleDuration(); } }
class ImagePullCallback extends PullImageResultCallback { private final DockerImage dockerImage; private final Instant startedAt; private ImagePullCallback(DockerImage dockerImage) { this.dockerImage = dockerImage; this.startedAt = clock.instant(); } @Override public void onError(Throwable throwable) { removeScheduledPoll(dockerImage); logger.log(Level.SEVERE, "Could not download image " + dockerImage.asString(), throwable); } @Override public void onComplete() { if (imageIsDownloaded(dockerImage)) { logger.log(Level.INFO, "Download completed: " + dockerImage.asString()); removeScheduledPoll(dockerImage); } else { numberOfDockerApiFails.increment(); throw new DockerClientException("Could not download image: " + dockerImage); } sampleDuration(); } }
Consider adding a unit test for these scenarios.
public static QuotaUsage calculateQuotaUsage(com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application) { var quotaUsageRate = application.clusters().values().stream() .map(cluster -> largestQuotaUsage(cluster.current(), cluster.max())) .mapToDouble(resources -> resources.nodes() * resources.nodeResources().cost()) .sum(); return QuotaUsage.create(quotaUsageRate); }
public static QuotaUsage calculateQuotaUsage(com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application) { var quotaUsageRate = application.clusters().values().stream() .map(cluster -> largestQuotaUsage(cluster.current(), cluster.max())) .mapToDouble(resources -> resources.nodes() * resources.nodeResources().cost()) .sum(); return QuotaUsage.create(quotaUsageRate); }
class DeploymentQuotaCalculator { public static Quota calculate(Quota tenantQuota, List<Application> tenantApps, ApplicationId deployingApp, ZoneId deployingZone, DeploymentSpec deploymentSpec) { if (tenantQuota.budget().isEmpty()) return tenantQuota; if (deployingZone.environment().isProduction()) return probablyEnoughForAll(tenantQuota, tenantApps, deployingApp, deploymentSpec); return getMaximumAllowedQuota(tenantQuota, tenantApps, deployingApp, deployingZone); } private static ClusterResources largestQuotaUsage(ClusterResources a, ClusterResources b) { var usageA = a.nodes() * a.nodeResources().cost(); var usageB = b.nodes() * b.nodeResources().cost(); return usageA < usageB ? b : a; } /** Just get the maximum quota we are allowed to use. */ private static Quota getMaximumAllowedQuota(Quota tenantQuota, List<Application> applications, ApplicationId application, ZoneId zone) { var usageOutsideDeployment = applications.stream() .map(app -> app.quotaUsage(application, zone)) .reduce(QuotaUsage::add).orElse(QuotaUsage.none); return tenantQuota.subtractUsage(usageOutsideDeployment.rate()); } /** * We want to avoid applying a resource change to an instance in production when it seems likely * that there will not be enough quota to apply this change to _all_ production instances. * <p> * To achieve this, we must make the assumption that all production instances will use * the same amount of resources, and so equally divide the quota among them. */ private static Quota probablyEnoughForAll(Quota tenantQuota, List<Application> tenantApps, ApplicationId application, DeploymentSpec deploymentSpec) { TenantAndApplicationId deployingApp = TenantAndApplicationId.from(application); var usageOutsideApplication = tenantApps.stream() .filter(app -> !app.id().equals(deployingApp)) .map(Application::quotaUsage).reduce(QuotaUsage::add).orElse(QuotaUsage.none); long productionDeployments = Math.max(1, deploymentSpec.instances().stream() .flatMap(instance -> instance.zones().stream()) .filter(zone -> zone.environment().isProduction()) .count()); return tenantQuota.withBudget( tenantQuota.subtractUsage(usageOutsideApplication.rate()) .budget().get().divide(BigDecimal.valueOf(productionDeployments), 5, RoundingMode.HALF_UP)); } }
class DeploymentQuotaCalculator { public static Quota calculate(Quota tenantQuota, List<Application> tenantApps, ApplicationId deployingApp, ZoneId deployingZone, DeploymentSpec deploymentSpec) { if (tenantQuota.budget().isEmpty()) return tenantQuota; if (deployingZone.environment().isProduction()) return probablyEnoughForAll(tenantQuota, tenantApps, deployingApp, deploymentSpec); return getMaximumAllowedQuota(tenantQuota, tenantApps, deployingApp, deployingZone); } private static ClusterResources largestQuotaUsage(ClusterResources a, ClusterResources b) { var usageA = a.nodes() * a.nodeResources().cost(); var usageB = b.nodes() * b.nodeResources().cost(); return usageA < usageB ? b : a; } /** Just get the maximum quota we are allowed to use. */ private static Quota getMaximumAllowedQuota(Quota tenantQuota, List<Application> applications, ApplicationId application, ZoneId zone) { var usageOutsideDeployment = applications.stream() .map(app -> app.quotaUsage(application, zone)) .reduce(QuotaUsage::add).orElse(QuotaUsage.none); return tenantQuota.subtractUsage(usageOutsideDeployment.rate()); } /** * We want to avoid applying a resource change to an instance in production when it seems likely * that there will not be enough quota to apply this change to _all_ production instances. * <p> * To achieve this, we must make the assumption that all production instances will use * the same amount of resources, and so equally divide the quota among them. */ private static Quota probablyEnoughForAll(Quota tenantQuota, List<Application> tenantApps, ApplicationId application, DeploymentSpec deploymentSpec) { TenantAndApplicationId deployingApp = TenantAndApplicationId.from(application); var usageOutsideApplication = tenantApps.stream() .filter(app -> !app.id().equals(deployingApp)) .map(Application::quotaUsage).reduce(QuotaUsage::add).orElse(QuotaUsage.none); long productionDeployments = Math.max(1, deploymentSpec.instances().stream() .flatMap(instance -> instance.zones().stream()) .filter(zone -> zone.environment().isProduction()) .count()); return tenantQuota.withBudget( tenantQuota.subtractUsage(usageOutsideApplication.rate()) .budget().get().divide(BigDecimal.valueOf(productionDeployments), 5, RoundingMode.HALF_UP)); } }
Assuming testing serialization isn't important here, I think it's better to construct `Application` directly.
public void using_highest_resource_use() throws IOException, URISyntaxException { var content = new String(Files.readAllBytes(Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json"))); var mapper = new ObjectMapper(); var application = mapper.readValue(content, ApplicationData.class).toApplication(); var usage = DeploymentQuotaCalculator.calculateQuotaUsage(application); assertEquals(1.164, usage.rate(), 0.001); }
var application = mapper.readValue(content, ApplicationData.class).toApplication();
public void using_highest_resource_use() throws IOException, URISyntaxException { var content = new String(Files.readAllBytes(Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json"))); var mapper = new ObjectMapper(); var application = mapper.readValue(content, ApplicationData.class).toApplication(); var usage = DeploymentQuotaCalculator.calculateQuotaUsage(application); assertEquals(1.164, usage.rate(), 0.001); }
class DeploymentQuotaCalculatorTest { @Test public void quota_is_divided_among_prod_instances() { Quota calculated = DeploymentQuotaCalculator.calculate(Quota.unlimited().withBudget(10), List.of(), ApplicationId.defaultId(), ZoneId.defaultId(), DeploymentSpec.fromXml( "<deployment version='1.0'>\n" + " <instance id='instance1'> \n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region active=\"true\">us-east-1</region>\n" + " <region active=\"false\">us-west-1</region>\n" + " </prod>\n" + " </instance>\n" + " <instance id='instance2'>\n" + " <perf/>\n" + " <dev/>\n" + " <prod>\n" + " <region active=\"true\">us-north-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>")); assertEquals(10d/3, calculated.budget().get().doubleValue(), 1e-5); } @Test public void unlimited_quota_remains_unlimited() { Quota calculated = DeploymentQuotaCalculator.calculate(Quota.unlimited(), List.of(), ApplicationId.defaultId(), ZoneId.defaultId(), DeploymentSpec.empty); assertTrue(calculated.isUnlimited()); } @Test public void zero_quota_remains_zero() { Quota calculated = DeploymentQuotaCalculator.calculate(Quota.zero(), List.of(), ApplicationId.defaultId(), ZoneId.defaultId(), DeploymentSpec.empty); assertEquals(calculated.budget().get().doubleValue(), 0, 1e-5); } @Test }
class DeploymentQuotaCalculatorTest { @Test public void quota_is_divided_among_prod_instances() { Quota calculated = DeploymentQuotaCalculator.calculate(Quota.unlimited().withBudget(10), List.of(), ApplicationId.defaultId(), ZoneId.defaultId(), DeploymentSpec.fromXml( "<deployment version='1.0'>\n" + " <instance id='instance1'> \n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region active=\"true\">us-east-1</region>\n" + " <region active=\"false\">us-west-1</region>\n" + " </prod>\n" + " </instance>\n" + " <instance id='instance2'>\n" + " <perf/>\n" + " <dev/>\n" + " <prod>\n" + " <region active=\"true\">us-north-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>")); assertEquals(10d/3, calculated.budget().get().doubleValue(), 1e-5); } @Test public void unlimited_quota_remains_unlimited() { Quota calculated = DeploymentQuotaCalculator.calculate(Quota.unlimited(), List.of(), ApplicationId.defaultId(), ZoneId.defaultId(), DeploymentSpec.empty); assertTrue(calculated.isUnlimited()); } @Test public void zero_quota_remains_zero() { Quota calculated = DeploymentQuotaCalculator.calculate(Quota.zero(), List.of(), ApplicationId.defaultId(), ZoneId.defaultId(), DeploymentSpec.empty); assertEquals(calculated.budget().get().doubleValue(), 0, 1e-5); } @Test }
Consider making an AutoCloseable class to replace these lines and the tenant.getApplicationRepo().lock().
public Activation activate(Session session, ApplicationId applicationId, Tenant tenant, boolean force) { NestedTransaction transaction = new NestedTransaction(); Optional<ApplicationTransaction> applicationTransaction = hostProvisioner.map(provisioner -> provisioner.lock(applicationId)) .map(lock -> new ApplicationTransaction(lock, transaction)); try (var sessionLock = tenant.getApplicationRepo().lock(applicationId)) { Session activeSession = getActiveSession(applicationId); CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); transaction.add(deactivateCurrentActivateNew(activeSession, session, force)); if (applicationTransaction.isPresent()) { hostProvisioner.get().activate(session.getAllocatedHosts().getHosts(), new ActivationContext(session.getSessionId()), applicationTransaction.get()); } else { transaction.commit(); } return new Activation(waiter, activeSession); } finally { applicationTransaction.ifPresent(ApplicationTransaction::close); } }
Optional<ApplicationTransaction> applicationTransaction = hostProvisioner.map(provisioner -> provisioner.lock(applicationId))
public Activation activate(Session session, ApplicationId applicationId, Tenant tenant, boolean force) { NestedTransaction transaction = new NestedTransaction(); Optional<ApplicationTransaction> applicationTransaction = hostProvisioner.map(provisioner -> provisioner.lock(applicationId)) .map(lock -> new ApplicationTransaction(lock, transaction)); try (var sessionLock = tenant.getApplicationRepo().lock(applicationId)) { Session activeSession = getActiveSession(applicationId); CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); transaction.add(deactivateCurrentActivateNew(activeSession, session, force)); if (applicationTransaction.isPresent()) { hostProvisioner.get().activate(session.getAllocatedHosts().getHosts(), new ActivationContext(session.getSessionId()), applicationTransaction.get()); } else { transaction.commit(); } return new Activation(waiter, activeSession); } finally { applicationTransaction.ifPresent(ApplicationTransaction::close); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
Tried it, didn't make the flow clearer imo.
public Activation activate(Session session, ApplicationId applicationId, Tenant tenant, boolean force) { NestedTransaction transaction = new NestedTransaction(); Optional<ApplicationTransaction> applicationTransaction = hostProvisioner.map(provisioner -> provisioner.lock(applicationId)) .map(lock -> new ApplicationTransaction(lock, transaction)); try (var sessionLock = tenant.getApplicationRepo().lock(applicationId)) { Session activeSession = getActiveSession(applicationId); CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); transaction.add(deactivateCurrentActivateNew(activeSession, session, force)); if (applicationTransaction.isPresent()) { hostProvisioner.get().activate(session.getAllocatedHosts().getHosts(), new ActivationContext(session.getSessionId()), applicationTransaction.get()); } else { transaction.commit(); } return new Activation(waiter, activeSession); } finally { applicationTransaction.ifPresent(ApplicationTransaction::close); } }
Optional<ApplicationTransaction> applicationTransaction = hostProvisioner.map(provisioner -> provisioner.lock(applicationId))
public Activation activate(Session session, ApplicationId applicationId, Tenant tenant, boolean force) { NestedTransaction transaction = new NestedTransaction(); Optional<ApplicationTransaction> applicationTransaction = hostProvisioner.map(provisioner -> provisioner.lock(applicationId)) .map(lock -> new ApplicationTransaction(lock, transaction)); try (var sessionLock = tenant.getApplicationRepo().lock(applicationId)) { Session activeSession = getActiveSession(applicationId); CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); transaction.add(deactivateCurrentActivateNew(activeSession, session, force)); if (applicationTransaction.isPresent()) { hostProvisioner.get().activate(session.getAllocatedHosts().getHosts(), new ActivationContext(session.getSessionId()), applicationTransaction.get()); } else { transaction.commit(); } return new Activation(waiter, activeSession); } finally { applicationTransaction.ifPresent(ApplicationTransaction::close); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
Ah you where referring to if it implemented equals/hashCode - not how to implement the isEmpty method?I think that is exactly what is done right?
public boolean isEmpty() { return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty() && address.isEmpty() && billingContact.isEmpty(); }
return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty()
public boolean isEmpty() { return this.equals(EMPTY); }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static TenantInfo EmptyInfo = new TenantInfo("","","", "", "", "", TenantInfoAddress.EmptyAddress, TenantInfoBillingContact.EmptyBillingContact); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static final TenantInfo EMPTY = new TenantInfo("","","", "", "", "", TenantInfoAddress.EMPTY, TenantInfoBillingContact.EMPTY); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TenantInfo that = (TenantInfo) o; return name.equals(that.name) && email.equals(that.email) && website.equals(that.website) && contactName.equals(that.contactName) && contactEmail.equals(that.contactEmail) && invoiceEmail.equals(that.invoiceEmail) && address.equals(that.address) && billingContact.equals(that.billingContact); } @Override public int hashCode() { return Objects.hash(name, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } }
Not sure how we ended up with this (Initially from #13267)... I guess this is the result after dividing by average number of containers on host or something. If so, I guess we need to somehow do something similar for shared hosts in dynamically provisioned zones :tired_face:
private long getThinPoolSize(NodeResources.StorageType storageType) { if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(zone(), NodeType.host); else return 4; }
return 4;
private long getThinPoolSize(NodeResources.StorageType storageType) { if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host); else return 4; }
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) { if (requested.isUnspecified()) return; if (requested.vcpu() < minAdvertisedVcpu()) illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu()); if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type())) illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type())); if (requested.diskGb() < minAdvertisedDiskGb(requested)) illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested)); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) { return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository), cluster.type()); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) { if (realResources.isUnspecified()) return true; if (realResources.vcpu() < minRealVcpu()) return false; if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false; if (realResources.diskGb() < minRealDiskGb()) return false; return true; } public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) { if (requested.isUnspecified()) return requested; return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu())) .withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb())) .withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb())); } private double minAdvertisedVcpu() { if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1; return 0.5; } private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) { if (zone().system() == SystemName.dev) return 1; if (clusterType == ClusterSpec.Type.admin) return 2; return 4; } private double minAdvertisedDiskGb(NodeResources requested) { return minRealDiskGb() + getThinPoolSize(requested.storageType()); } private double minRealVcpu() { return minAdvertisedVcpu(); } private double minRealMemoryGb(ClusterSpec.Type clusterType) { return minAdvertisedMemoryGb(clusterType) - 1.7; } private double minRealDiskGb() { return 6; } private Zone zone() { return nodeRepository.zone(); } private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) { if ( ! unit.isEmpty()) unit = " " + unit; String message = String.format(Locale.ENGLISH, "%s cluster '%s': " + type + " " + resource + " size is %.2f%s but must be at least %.2f%s", cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit); throw new IllegalArgumentException(message); } }
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) { if (requested.isUnspecified()) return; if (requested.vcpu() < minAdvertisedVcpu()) illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu()); if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type())) illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type())); if (requested.diskGb() < minAdvertisedDiskGb(requested)) illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested)); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) { return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository), cluster.type()); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) { if (realResources.isUnspecified()) return true; if (realResources.vcpu() < minRealVcpu()) return false; if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false; if (realResources.diskGb() < minRealDiskGb()) return false; return true; } public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) { if (requested.isUnspecified()) return requested; return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu())) .withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb())) .withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb())); } private double minAdvertisedVcpu() { if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1; return 0.5; } private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) { if (zone().system() == SystemName.dev) return 1; if (clusterType == ClusterSpec.Type.admin) return 2; return 4; } private double minAdvertisedDiskGb(NodeResources requested) { return minRealDiskGb() + getThinPoolSize(requested.storageType()); } private double minRealVcpu() { return minAdvertisedVcpu(); } private double minRealMemoryGb(ClusterSpec.Type clusterType) { return minAdvertisedMemoryGb(clusterType) - 1.7; } private double minRealDiskGb() { return 6; } private Zone zone() { return nodeRepository.zone(); } private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) { if ( ! unit.isEmpty()) unit = " " + unit; String message = String.format(Locale.ENGLISH, "%s cluster '%s': " + type + " " + resource + " size is %.2f%s but must be at least %.2f%s", cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit); throw new IllegalArgumentException(message); } }
OK, need to look into that, then
private long getThinPoolSize(NodeResources.StorageType storageType) { if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(zone(), NodeType.host); else return 4; }
return 4;
private long getThinPoolSize(NodeResources.StorageType storageType) { if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host); else return 4; }
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) { if (requested.isUnspecified()) return; if (requested.vcpu() < minAdvertisedVcpu()) illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu()); if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type())) illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type())); if (requested.diskGb() < minAdvertisedDiskGb(requested)) illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested)); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) { return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository), cluster.type()); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) { if (realResources.isUnspecified()) return true; if (realResources.vcpu() < minRealVcpu()) return false; if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false; if (realResources.diskGb() < minRealDiskGb()) return false; return true; } public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) { if (requested.isUnspecified()) return requested; return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu())) .withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb())) .withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb())); } private double minAdvertisedVcpu() { if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1; return 0.5; } private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) { if (zone().system() == SystemName.dev) return 1; if (clusterType == ClusterSpec.Type.admin) return 2; return 4; } private double minAdvertisedDiskGb(NodeResources requested) { return minRealDiskGb() + getThinPoolSize(requested.storageType()); } private double minRealVcpu() { return minAdvertisedVcpu(); } private double minRealMemoryGb(ClusterSpec.Type clusterType) { return minAdvertisedMemoryGb(clusterType) - 1.7; } private double minRealDiskGb() { return 6; } private Zone zone() { return nodeRepository.zone(); } private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) { if ( ! unit.isEmpty()) unit = " " + unit; String message = String.format(Locale.ENGLISH, "%s cluster '%s': " + type + " " + resource + " size is %.2f%s but must be at least %.2f%s", cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit); throw new IllegalArgumentException(message); } }
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) { if (requested.isUnspecified()) return; if (requested.vcpu() < minAdvertisedVcpu()) illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu()); if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type())) illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type())); if (requested.diskGb() < minAdvertisedDiskGb(requested)) illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested)); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) { return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository), cluster.type()); } /** Returns whether the real resources we'll end up with on a given tenant node are within limits */ public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) { if (realResources.isUnspecified()) return true; if (realResources.vcpu() < minRealVcpu()) return false; if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false; if (realResources.diskGb() < minRealDiskGb()) return false; return true; } public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) { if (requested.isUnspecified()) return requested; return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu())) .withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb())) .withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb())); } private double minAdvertisedVcpu() { if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1; return 0.5; } private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) { if (zone().system() == SystemName.dev) return 1; if (clusterType == ClusterSpec.Type.admin) return 2; return 4; } private double minAdvertisedDiskGb(NodeResources requested) { return minRealDiskGb() + getThinPoolSize(requested.storageType()); } private double minRealVcpu() { return minAdvertisedVcpu(); } private double minRealMemoryGb(ClusterSpec.Type clusterType) { return minAdvertisedMemoryGb(clusterType) - 1.7; } private double minRealDiskGb() { return 6; } private Zone zone() { return nodeRepository.zone(); } private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) { if ( ! unit.isEmpty()) unit = " " + unit; String message = String.format(Locale.ENGLISH, "%s cluster '%s': " + type + " " + resource + " size is %.2f%s but must be at least %.2f%s", cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit); throw new IllegalArgumentException(message); } }
Should set a non-empty `TenantInfo` here, and assert the re-serialized value below to ensure that serialization is correct.
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EmptyInfo); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
TenantInfo.EmptyInfo);
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test @Test public void cloud_tenant_with_info() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY.withName("Ofni Tnanet")); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.info(), serialized.info()); } @Test public void cloud_tenant_with_tenant_info_partial() { TenantInfo partialInfo = TenantInfo.EMPTY .withAddress(TenantInfoAddress.EMPTY.withCity("Hønefoss")); Slime slime = new Slime(); Cursor parentObject = slime.setObject(); serializer.toSlime(partialInfo, parentObject); assertEquals("{\"info\":{\"name\":\"\",\"email\":\"\",\"website\":\"\",\"invoiceEmail\":\"\",\"contactName\":\"\",\"contactEmail\":\"\",\"address\":{\"addressLines\":\"\",\"postalCodeOrZip\":\"\",\"city\":\"Hønefoss\",\"stateRegionProvince\":\"\",\"country\":\"\"}}}", slime.toString()); } @Test public void cloud_tenant_with_tenant_info_full() { TenantInfo fullInfo = TenantInfo.EMPTY .withName("My Company") .withEmail("email@mycomp.any") .withWebsite("http: .withContactEmail("ceo@mycomp.any") .withContactName("My Name") .withInvoiceEmail("invoice@mycomp.any") .withAddress(TenantInfoAddress.EMPTY .withCity("Hønefoss") .withAddressLines("Riperbakken 2") .withCountry("Norway") .withPostalCodeOrZip("3510") .withStateRegionProvince("Viken")) .withBillingContact(TenantInfoBillingContact.EMPTY .withEmail("thomas@sodor.com") .withName("Thomas The Tank Engine") .withPhone("NA") .withAddress(TenantInfoAddress.EMPTY .withCity("Suddery") .withCountry("Sodor") .withAddressLines("Central Station") .withStateRegionProvince("Irish Sea"))); Slime slime = new Slime(); Cursor parentCursor = slime.setObject(); serializer.toSlime(fullInfo, parentCursor); TenantInfo roundTripInfo = serializer.tenantInfoFromSlime(parentCursor.field("info")); assertEquals(fullInfo, roundTripInfo); } private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
Good catch! Could you give a hint to what part of testing could be simplified due to this?
public boolean isEmpty() { return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty() && address.isEmpty() && billingContact.isEmpty(); }
return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty()
public boolean isEmpty() { return this.equals(EMPTY); }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static TenantInfo EmptyInfo = new TenantInfo("","","", "", "", "", TenantInfoAddress.EmptyAddress, TenantInfoBillingContact.EmptyBillingContact); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static final TenantInfo EMPTY = new TenantInfo("","","", "", "", "", TenantInfoAddress.EMPTY, TenantInfoBillingContact.EMPTY); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TenantInfo that = (TenantInfo) o; return name.equals(that.name) && email.equals(that.email) && website.equals(that.website) && contactName.equals(that.contactName) && contactEmail.equals(that.contactEmail) && invoiceEmail.equals(that.invoiceEmail) && address.equals(that.address) && billingContact.equals(that.billingContact); } @Override public int hashCode() { return Objects.hash(name, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } }
If you add a fully populated `TenantInfo` to `@Test public void cloud_tenant()` you can do do `assertEquals(tenant.tenantInfo(), serialized.tenantInfo())` to test that serialization works as expected in both directions.
public boolean isEmpty() { return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty() && address.isEmpty() && billingContact.isEmpty(); }
return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty()
public boolean isEmpty() { return this.equals(EMPTY); }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static TenantInfo EmptyInfo = new TenantInfo("","","", "", "", "", TenantInfoAddress.EmptyAddress, TenantInfoBillingContact.EmptyBillingContact); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static final TenantInfo EMPTY = new TenantInfo("","","", "", "", "", TenantInfoAddress.EMPTY, TenantInfoBillingContact.EMPTY); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TenantInfo that = (TenantInfo) o; return name.equals(that.name) && email.equals(that.email) && website.equals(that.website) && contactName.equals(that.contactName) && contactEmail.equals(that.contactEmail) && invoiceEmail.equals(that.invoiceEmail) && address.equals(that.address) && billingContact.equals(that.billingContact); } @Override public int hashCode() { return Objects.hash(name, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } }
Hmm, probably, yes ...
private CloudName findCloud(JobType job) { return zones.zones().all().among(job.zone()).zones().stream().findFirst().map(ZoneApi::getCloudName).orElse(null); }
return zones.zones().all().among(job.zone()).zones().stream().findFirst().map(ZoneApi::getCloudName).orElse(null);
private CloudName findCloud(JobType job) { return zones.zones().all().get(job.zone()).map(ZoneApi::getCloudName).orElse(null); }
class DeploymentStatus { private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final VersionStatus versionStatus; private final Version systemVersion; private final Function<InstanceName, VersionCompatibility> versionCompatibility; private final ZoneRegistry zones; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Function<JobId, JobStatus> allJobs, ZoneRegistry zones, VersionStatus versionStatus, Version systemVersion, Function<InstanceName, VersionCompatibility> versionCompatibility, Instant now) { this.application = requireNonNull(application); this.zones = zones; this.versionStatus = requireNonNull(versionStatus); this.systemVersion = requireNonNull(systemVersion); this.versionCompatibility = versionCompatibility; this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); Map<JobId, JobStatus> jobs = new HashMap<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps, job -> jobs.computeIfAbsent(job, allJobs)); this.allSteps = Collections.unmodifiableList(allSteps); this.allJobs = JobList.from(jobSteps.keySet().stream().map(allJobs).collect(toList())); } private JobType systemTest(JobType dependent) { return JobType.systemTest(zones, dependent == null ? null : findCloud(dependent)); } private JobType stagingTest(JobType dependent) { return JobType.stagingTest(zones, dependent == null ? null : findCloud(dependent)); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs both dependent on the dependency, and a dependency for the dependent, are failing. */ private boolean hasFailures(StepStatus dependency, StepStatus dependent) { Set<StepStatus> dependents = new HashSet<>(); fillDependents(dependency, new HashSet<>(), dependents, dependent); Set<JobId> criticalJobs = dependents.stream().flatMap(step -> step.job().stream()).collect(toSet()); return ! allJobs.matching(job -> criticalJobs.contains(job.id())) .failingHard() .isEmpty(); } private boolean fillDependents(StepStatus dependency, Set<StepStatus> visited, Set<StepStatus> dependents, StepStatus current) { if (visited.contains(current)) return dependents.contains(current); if (dependency == current) dependents.add(current); else for (StepStatus dep : current.dependencies) if (fillDependents(dependency, visited, dependents, dep)) dependents.add(current); visited.add(current); return dependents.contains(current); } /** Whether any job is failing on versions selected by the given filter, with errors other than lack of capacity in a test zone.. */ public boolean hasFailures(Predicate<RevisionId> revisionFilter) { return ! allJobs.failingHard() .matching(job -> revisionFilter.test(job.lastTriggered().get().versions().targetRevision())) .isEmpty(); } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failingHard().isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(CustomCollectors.toLinkedMap(job -> job.id().type(), Function.identity())); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.groupingBy(job -> job.id().application()); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any outstanding change, which will likely be needed to later deploy this change. */ public Map<JobId, List<Job>> jobsToRun() { if (application.revisions().last().isEmpty()) return Map.of(); Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Job>> jobs = jobsToRun(changes); Map<InstanceName, Change> outstandingChanges = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) { Change outstanding = outstandingChange(instance); if (outstanding.hasTargets()) outstandingChanges.put(instance, outstanding.onTopOf(application.require(instance).change())); } var testJobs = jobsToRun(outstandingChanges, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), Collections::unmodifiableMap)); } private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { if (application.revisions().last().isEmpty()) return Map.of(); Map<JobId, List<Job>> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Job>> testJobs = testJobs(productionJobs); Map<JobId, List<Job>> jobs = new LinkedHashMap<>(testJobs); jobs.putAll(productionJobs); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || job.type().isProduction() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Collection<Optional<JobId>> firstProductionJobsWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .collect(groupingBy(jobId -> findCloud(jobId.type()), Collectors.reducing((o, n) -> o))) .values(); if (firstProductionJobsWithDeployment.isEmpty()) firstProductionJobsWithDeployment = List.of(Optional.empty()); for (Optional<JobId> firstProductionJobWithDeploymentInCloud : firstProductionJobsWithDeployment) { Versions versions = Versions.from(change, application, firstProductionJobWithDeploymentInCloud.flatMap(this::deploymentFor), fallbackPlatform(change, job)); if (step.completedAt(change, firstProductionJobWithDeploymentInCloud).isEmpty()) { JobType actualType = job.type().isSystemTest() ? systemTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null)) : stagingTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null)); jobs.merge(job, List.of(new Job(actualType, versions, step.readyAt(change), change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(jobs); } /** Fall back to the newest, deployable platform, which is compatible with what we want to deploy. */ public Version fallbackPlatform(Change change, JobId job) { Optional<Version> compileVersion = change.revision().map(application.revisions()::get).flatMap(ApplicationVersion::compileVersion); if (compileVersion.isEmpty()) return systemVersion; for (VespaVersion version : reversed(versionStatus.deployableVersions())) if (versionCompatibility.apply(job.application().instance()).accept(version.versionNumber(), compileVersion.get())) return version.versionNumber(); throw new IllegalArgumentException("no legal platform version exists in this system for compile version " + compileVersion.get()); } /** The set of jobs that need to run for the given changes to be considered complete. */ public boolean hasCompleted(InstanceName instance, Change change) { if ( ! application.deploymentSpec().requireInstance(instance).concerns(prod)) { if (newestTested(instance, run -> run.versions().targetRevision()).map(change::downgrades).orElse(false)) return true; if (newestTested(instance, run -> run.versions().targetPlatform()).map(change::downgrades).orElse(false)) return true; } return jobsToRun(Map.of(instance, change), false).isEmpty(); } /** The set of jobs that need to run for the given changes to be considered complete. */ private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public List<StepStatus> allSteps() { return allSteps; } public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone())); } private <T extends Comparable<T>> Optional<T> newestTested(InstanceName instance, Function<Run, T> runMapper) { Set<CloudName> clouds = jobSteps.keySet().stream() .filter(job -> job.type().isProduction()) .map(job -> findCloud(job.type())) .collect(toSet()); List<ZoneId> testZones = new ArrayList<>(); if (application.deploymentSpec().requireInstance(instance).concerns(test)) { if (clouds.isEmpty()) testZones.add(JobType.systemTest(zones, null).zone()); else for (CloudName cloud: clouds) testZones.add(JobType.systemTest(zones, cloud).zone()); } if (application.deploymentSpec().requireInstance(instance).concerns(staging)) { if (clouds.isEmpty()) testZones.add(JobType.stagingTest(zones, null).zone()); else for (CloudName cloud: clouds) testZones.add(JobType.stagingTest(zones, cloud).zone()); } Map<ZoneId, Optional<T>> newestPerZone = instanceJobs().get(application.id().instance(instance)) .type(systemTest(null), stagingTest(null)) .asList().stream().flatMap(jobs -> jobs.runs().values().stream()) .filter(Run::hasSucceeded) .collect(groupingBy(run -> run.id().type().zone(), mapping(runMapper, Collectors.maxBy(naturalOrder())))); return newestPerZone.keySet().containsAll(testZones) ? testZones.stream().map(newestPerZone::get) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().compareTo(o.get()) < 0 ? n : o) .orElse(Optional.empty()) : Optional.empty(); } /** * The change to a revision which all dependencies of the given instance has completed, * which does not downgrade any deployments in the instance, * which is not already rolling out to the instance, and * which causes at least one job to run if deployed to the instance. * For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest. */ public Change outstandingChange(InstanceName instance) { StepStatus status = instanceSteps().get(instance); if (status == null) return Change.empty(); DeploymentInstanceSpec spec = application.deploymentSpec().requireInstance(instance); boolean ascending = next == spec.revisionTarget(); int cumulativeRisk = 0; int nextRisk = 0; int skippedCumulativeRisk = 0; Instant readySince = now; Optional<RevisionId> newestRevision = application.productionDeployments() .getOrDefault(instance, List.of()).stream() .map(Deployment::revision).max(naturalOrder()); Change candidate = Change.empty(); for (ApplicationVersion version : application.revisions().deployable(ascending)) { Change change = Change.of(version.id()); if ( newestRevision.isPresent() && change.downgrades(newestRevision.get()) || ! application.require(instance).change().revision().map(change::upgrades).orElse(true) || hasCompleted(instance, change)) { if (ascending) continue; else return Change.empty(); } skippedCumulativeRisk += version.risk(); nextRisk = nextRisk > 0 ? nextRisk : version.risk(); Optional<Instant> readyAt = status.dependenciesCompletedAt(Change.of(version.id()), Optional.empty()); if (readyAt.map(now::isBefore).orElse(true)) continue; cumulativeRisk += skippedCumulativeRisk; skippedCumulativeRisk = 0; nextRisk = 0; if (cumulativeRisk >= spec.maxRisk()) return candidate.equals(Change.empty()) ? change : candidate; if (readyAt.get().isBefore(readySince)) readySince = readyAt.get(); candidate = change; } return instanceJobs(instance).values().stream().allMatch(jobs -> jobs.lastTriggered().isEmpty()) || cumulativeRisk >= spec.minRisk() || cumulativeRisk + nextRisk > spec.maxRisk() || ! now.isBefore(readySince.plus(Duration.ofHours(spec.maxIdleHours()))) ? candidate : Change.empty(); } /** Earliest instant when job was triggered with given versions, or both system and staging tests were successful. */ public Optional<Instant> verifiedAt(JobId job, Versions versions) { Optional<Instant> triggeredAt = allJobs.get(job) .flatMap(status -> status.runs().values().stream() .filter(run -> run.versions().equals(versions)) .findFirst()) .map(Run::start); Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(null), versions); Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(null), versions); if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt; Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt; return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt; } /** Earliest instant when versions were tested for the given instance */ private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) { return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance())) .orElse(allJobs) .type(type).asList().stream() .flatMap(status -> RunList.from(status) .on(versions) .matching(run -> run.id().type().zone().equals(type.zone())) .matching(Run::hasSucceeded) .asList().stream() .map(Run::start)) .min(naturalOrder()); } private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { Map<JobId, List<Job>> jobs = new LinkedHashMap<>(); jobSteps.forEach((job, step) -> { if ( ! job.application().instance().equals(instance) || ! job.type().isProduction()) return; if (step.completedAt(change, Optional.of(job)).isPresent()) return; Optional<Deployment> deployment = deploymentFor(job); Optional<Version> existingPlatform = deployment.map(Deployment::version); Optional<RevisionId> existingRevision = deployment.map(Deployment::revision); boolean deployingCompatibilityChange = areIncompatible(existingPlatform, change.revision(), job) || areIncompatible(change.platform(), existingRevision, job); if (assumeUpgradesSucceed) { if (deployingCompatibilityChange) return; Change currentChange = application.require(instance).change(); Versions target = Versions.from(currentChange, application, deployment, fallbackPlatform(currentChange, job)); existingPlatform = Optional.of(target.targetPlatform()); existingRevision = Optional.of(target.targetRevision()); } List<Job> toRun = new ArrayList<>(); List<Change> changes = deployingCompatibilityChange ? List.of(change) : changes(job, step, change); for (Change partial : changes) { Job jobToRun = new Job(job.type(), Versions.from(partial, application, existingPlatform, existingRevision, fallbackPlatform(partial, job)), step.readyAt(partial, Optional.of(job)), partial); toRun.add(jobToRun); existingPlatform = Optional.of(jobToRun.versions.targetPlatform()); existingRevision = Optional.of(jobToRun.versions.targetRevision()); } jobs.put(job, toRun); }); return jobs; } private boolean areIncompatible(Optional<Version> platform, Optional<RevisionId> revision, JobId job) { Optional<Version> compileVersion = revision.map(application.revisions()::get) .flatMap(ApplicationVersion::compileVersion); return platform.isPresent() && compileVersion.isPresent() && versionCompatibility.apply(job.application().instance()).refuse(platform.get(), compileVersion.get()); } /** Changes to deploy with the given job, possibly split in two steps. */ private List<Change> changes(JobId job, StepStatus step, Change change) { if (change.platform().isEmpty() || change.revision().isEmpty() || change.isPinned()) return List.of(change); if ( step.completedAt(change.withoutApplication(), Optional.of(job)).isPresent() || step.completedAt(change.withoutPlatform(), Optional.of(job)).isPresent()) return List.of(change); JobId deployment = new JobId(job.application(), JobType.deploymentTo(job.type().zone())); UpgradeRollout rollout = application.deploymentSpec().requireInstance(job.application().instance()).upgradeRollout(); if (job.type().isTest()) { Optional<Instant> platformDeployedAt = jobSteps.get(deployment).completedAt(change.withoutApplication(), Optional.of(deployment)); Optional<Instant> revisionDeployedAt = jobSteps.get(deployment).completedAt(change.withoutPlatform(), Optional.of(deployment)); if (platformDeployedAt.isEmpty() && revisionDeployedAt.isPresent()) return List.of(change.withoutPlatform(), change); if (platformDeployedAt.isPresent() && revisionDeployedAt.isEmpty()) { if (jobSteps.get(deployment).readyAt(change, Optional.of(deployment)) .map(ready -> ! now.isBefore(ready)).orElse(false)) { switch (rollout) { case separate: return hasFailures(jobSteps.get(deployment), jobSteps.get(job)) ? List.of(change) : List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } return List.of(change.withoutApplication(), change); } } Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job)); Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job)); if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) { switch (rollout) { case separate: return List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } if (platformReadyAt.isEmpty()) return List.of(change.withoutPlatform(), change); if (revisionReadyAt.isEmpty()) { return List.of(change.withoutApplication(), change); } boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get()); boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get()); boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type())) .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), systemVersion)) .isEmpty(); switch (rollout) { case separate: return (platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) ? step.job().flatMap(jobs()::get).flatMap(JobStatus::firstFailing).isPresent() || failingUpgradeOnlyTests ? List.of(change) : List.of(change.withoutApplication(), change) : revisionReadyFirst ? List.of(change.withoutPlatform(), change) : List.of(change); case leading: return List.of(change); case simultaneous: return platformReadyFirst ? List.of(change) : List.of(change.withoutPlatform(), change); default: throw new IllegalStateException("Unknown upgrade rollout policy"); } } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) { Map<JobId, List<Job>> testJobs = new LinkedHashMap<>(); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { for (Job productionJob : versionsList) if (allJobs.successOn(testType, productionJob.versions()).asList().isEmpty()) testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps().get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); }); } } }); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { for (Job productionJob : versionsList) if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(testType, productionJob.versions()).asList().isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone()) && testJobs.get(test).stream().anyMatch(testJob -> testJob.versions().equals(productionJob.versions())))) { JobId testJob = firstDeclaredOrElseImplicitTest(testType); testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps.get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .filter(jobSteps::containsKey) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.containsKey(jobId) && jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps, Function<JobId, JobStatus> jobs) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null, jobs, instanceWithImplicitTest(test, spec), instanceWithImplicitTest(staging, spec)); return Collections.unmodifiableMap(dependencies); } private static InstanceName instanceWithImplicitTest(Environment environment, DeploymentSpec spec) { InstanceName first = null; for (DeploymentInstanceSpec step : spec.instances()) { if (step.concerns(environment)) return null; first = first != null ? first : step.name(); } return first; } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs, InstanceName implicitSystemTest, InstanceName implicitStagingTest) { if (step.steps().isEmpty() && ! (step instanceof DeploymentInstanceSpec)) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; JobId jobId; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = step.concerns(test) ? systemTest(null) : stagingTest(null); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, jobs.apply(jobId), true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.test(((DeclaredTest) step).region()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.prod(((DeclaredZone) step).region().get()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else return previous; allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); if (instance.equals(implicitSystemTest)) { JobId job = new JobId(application.id().instance(instance), systemTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } if (instance.equals(implicitStagingTest)) { JobId job = new JobId(application.id().instance(instance), stagingTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(staging), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { Instant latest = Instant.EPOCH; for (StepStatus step : dependencies) { Optional<Instant> completedAt = step.completedAt(change, dependent); if (completedAt.isEmpty()) return Optional.empty(); latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest; } return Optional.of(latest); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** The time at which this step is ready to run the specified change and / or versions. */ @Override public Optional<Instant> readyAt(Change change) { return status.jobSteps.keySet().stream() .filter(job -> job.type().isProduction() && job.application().instance().equals(instance.name())) .map(job -> super.readyAt(change, Optional.of(job))) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().isBefore(o.get()) ? n : o) .orElseGet(() -> super.readyAt(change, Optional.empty())); } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.revision().isEmpty() || change.revision().equals(instance.change().revision())) || step().steps().stream().noneMatch(step -> step.concerns(prod))) ? dependenciesCompletedAt(change, dependent).or(() -> Optional.of(Instant.EPOCH).filter(__ -> change.hasTargets())) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.revision().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.revision().isPresent() && ! change.revision().get().equals(lastVersions.targetRevision())) return Optional.empty(); if (job.id().type().environment().isTest() && job.isNodeAllocationFailure()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(job.id().application().instance()) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> testedAt = status.verifiedAt(job.id(), Versions.from(change, status.application, existingDeployment, status.fallbackPlatform(change, job.id()))); if (readyAt.isEmpty() || testedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(testedAt.get()) ? readyAt : testedAt; } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); if ( change.revision().isPresent() && ! existingDeployment.map(Deployment::revision).equals(change.revision()) && dependent.equals(job())) return Optional.empty(); Change fullChange = status.application().require(job.id().application().instance()).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.revision())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.revision()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); Optional<Instant> end = Optional.empty(); for (Run run : job.runs().descendingMap().values()) { if (run.versions().targetsMatch(change)) { if (run.hasSucceeded()) end = run.end(); } else if (dependent.equals(job())) break; } return end; } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { JobId prodId = new JobId(job.id().application(), JobType.deploymentTo(job.id().type().zone())); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); if (readyAt.isEmpty() || deployedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(deployedAt.get()) ? readyAt : deployedAt; } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); return (dependent.equals(job()) ? job.lastTriggered().filter(run -> deployedAt.map(at -> ! run.start().isBefore(at)).orElse(false)).stream() : job.runs().values().stream()) .filter(Run::hasSucceeded) .filter(run -> run.versions().targetsMatch(change)) .flatMap(run -> run.end().stream()).findFirst(); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job, boolean declared) { return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> dependent.flatMap(status::deploymentFor) .map(deployment -> run.versions().targetsMatch(Versions.from(change, status.application, Optional.of(deployment), status.fallbackPlatform(change, dependent.get())))) .orElseGet(() -> (change.platform().isEmpty() || change.platform().get().equals(run.versions().targetPlatform())) && (change.revision().isEmpty() || change.revision().get().equals(run.versions().targetRevision())))) .matching(Run::hasSucceeded) .matching(run -> dependent.isEmpty() || status.findCloud(dependent.get().type()).equals(status.findCloud(run.id().type()))) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } public static class Job { private final JobType type; private final Versions versions; private final Optional<Instant> readyAt; private final Change change; public Job(JobType type, Versions versions, Optional<Instant> readyAt, Change change) { this.type = type; this.versions = type.isSystemTest() ? versions.withoutSources() : versions; this.readyAt = readyAt; this.change = change; } public JobType type() { return type; } public Versions versions() { return versions; } public Optional<Instant> readyAt() { return readyAt; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Job job = (Job) o; return type.zone().equals(job.type.zone()) && versions.equals(job.versions) && readyAt.equals(job.readyAt) && change.equals(job.change); } @Override public int hashCode() { return Objects.hash(type.zone(), versions, readyAt, change); } @Override public String toString() { return change + " with versions " + versions + ", ready at " + readyAt; } } }
class DeploymentStatus { private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final VersionStatus versionStatus; private final Version systemVersion; private final Function<InstanceName, VersionCompatibility> versionCompatibility; private final ZoneRegistry zones; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Function<JobId, JobStatus> allJobs, ZoneRegistry zones, VersionStatus versionStatus, Version systemVersion, Function<InstanceName, VersionCompatibility> versionCompatibility, Instant now) { this.application = requireNonNull(application); this.zones = zones; this.versionStatus = requireNonNull(versionStatus); this.systemVersion = requireNonNull(systemVersion); this.versionCompatibility = versionCompatibility; this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); Map<JobId, JobStatus> jobs = new HashMap<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps, job -> jobs.computeIfAbsent(job, allJobs)); this.allSteps = Collections.unmodifiableList(allSteps); this.allJobs = JobList.from(jobSteps.keySet().stream().map(allJobs).collect(toList())); } private JobType systemTest(JobType dependent) { return JobType.systemTest(zones, dependent == null ? null : findCloud(dependent)); } private JobType stagingTest(JobType dependent) { return JobType.stagingTest(zones, dependent == null ? null : findCloud(dependent)); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs both dependent on the dependency, and a dependency for the dependent, are failing. */ private boolean hasFailures(StepStatus dependency, StepStatus dependent) { Set<StepStatus> dependents = new HashSet<>(); fillDependents(dependency, new HashSet<>(), dependents, dependent); Set<JobId> criticalJobs = dependents.stream().flatMap(step -> step.job().stream()).collect(toSet()); return ! allJobs.matching(job -> criticalJobs.contains(job.id())) .failingHard() .isEmpty(); } private boolean fillDependents(StepStatus dependency, Set<StepStatus> visited, Set<StepStatus> dependents, StepStatus current) { if (visited.contains(current)) return dependents.contains(current); if (dependency == current) dependents.add(current); else for (StepStatus dep : current.dependencies) if (fillDependents(dependency, visited, dependents, dep)) dependents.add(current); visited.add(current); return dependents.contains(current); } /** Whether any job is failing on versions selected by the given filter, with errors other than lack of capacity in a test zone.. */ public boolean hasFailures(Predicate<RevisionId> revisionFilter) { return ! allJobs.failingHard() .matching(job -> revisionFilter.test(job.lastTriggered().get().versions().targetRevision())) .isEmpty(); } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failingHard().isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(CustomCollectors.toLinkedMap(job -> job.id().type(), Function.identity())); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.groupingBy(job -> job.id().application()); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any outstanding change, which will likely be needed to later deploy this change. */ public Map<JobId, List<Job>> jobsToRun() { if (application.revisions().last().isEmpty()) return Map.of(); Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Job>> jobs = jobsToRun(changes); Map<InstanceName, Change> outstandingChanges = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) { Change outstanding = outstandingChange(instance); if (outstanding.hasTargets()) outstandingChanges.put(instance, outstanding.onTopOf(application.require(instance).change())); } var testJobs = jobsToRun(outstandingChanges, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), Collections::unmodifiableMap)); } private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { if (application.revisions().last().isEmpty()) return Map.of(); Map<JobId, List<Job>> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Job>> testJobs = testJobs(productionJobs); Map<JobId, List<Job>> jobs = new LinkedHashMap<>(testJobs); jobs.putAll(productionJobs); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || job.type().isProduction() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Collection<Optional<JobId>> firstProductionJobsWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .collect(groupingBy(jobId -> findCloud(jobId.type()), Collectors.reducing((o, n) -> o))) .values(); if (firstProductionJobsWithDeployment.isEmpty()) firstProductionJobsWithDeployment = List.of(Optional.empty()); for (Optional<JobId> firstProductionJobWithDeploymentInCloud : firstProductionJobsWithDeployment) { Versions versions = Versions.from(change, application, firstProductionJobWithDeploymentInCloud.flatMap(this::deploymentFor), fallbackPlatform(change, job)); if (step.completedAt(change, firstProductionJobWithDeploymentInCloud).isEmpty()) { JobType actualType = job.type().isSystemTest() ? systemTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null)) : stagingTest(firstProductionJobWithDeploymentInCloud.map(JobId::type).orElse(null)); jobs.merge(job, List.of(new Job(actualType, versions, step.readyAt(change), change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(jobs); } /** Fall back to the newest, deployable platform, which is compatible with what we want to deploy. */ public Version fallbackPlatform(Change change, JobId job) { Optional<Version> compileVersion = change.revision().map(application.revisions()::get).flatMap(ApplicationVersion::compileVersion); if (compileVersion.isEmpty()) return systemVersion; for (VespaVersion version : reversed(versionStatus.deployableVersions())) if (versionCompatibility.apply(job.application().instance()).accept(version.versionNumber(), compileVersion.get())) return version.versionNumber(); throw new IllegalArgumentException("no legal platform version exists in this system for compile version " + compileVersion.get()); } /** The set of jobs that need to run for the given changes to be considered complete. */ public boolean hasCompleted(InstanceName instance, Change change) { if ( ! application.deploymentSpec().requireInstance(instance).concerns(prod)) { if (newestTested(instance, run -> run.versions().targetRevision()).map(change::downgrades).orElse(false)) return true; if (newestTested(instance, run -> run.versions().targetPlatform()).map(change::downgrades).orElse(false)) return true; } return jobsToRun(Map.of(instance, change), false).isEmpty(); } /** The set of jobs that need to run for the given changes to be considered complete. */ private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public List<StepStatus> allSteps() { return allSteps; } public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone())); } private <T extends Comparable<T>> Optional<T> newestTested(InstanceName instance, Function<Run, T> runMapper) { Set<CloudName> clouds = jobSteps.keySet().stream() .filter(job -> job.type().isProduction()) .map(job -> findCloud(job.type())) .collect(toSet()); List<ZoneId> testZones = new ArrayList<>(); if (application.deploymentSpec().requireInstance(instance).concerns(test)) { if (clouds.isEmpty()) testZones.add(JobType.systemTest(zones, null).zone()); else for (CloudName cloud: clouds) testZones.add(JobType.systemTest(zones, cloud).zone()); } if (application.deploymentSpec().requireInstance(instance).concerns(staging)) { if (clouds.isEmpty()) testZones.add(JobType.stagingTest(zones, null).zone()); else for (CloudName cloud: clouds) testZones.add(JobType.stagingTest(zones, cloud).zone()); } Map<ZoneId, Optional<T>> newestPerZone = instanceJobs().get(application.id().instance(instance)) .type(systemTest(null), stagingTest(null)) .asList().stream().flatMap(jobs -> jobs.runs().values().stream()) .filter(Run::hasSucceeded) .collect(groupingBy(run -> run.id().type().zone(), mapping(runMapper, Collectors.maxBy(naturalOrder())))); return newestPerZone.keySet().containsAll(testZones) ? testZones.stream().map(newestPerZone::get) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().compareTo(o.get()) < 0 ? n : o) .orElse(Optional.empty()) : Optional.empty(); } /** * The change to a revision which all dependencies of the given instance has completed, * which does not downgrade any deployments in the instance, * which is not already rolling out to the instance, and * which causes at least one job to run if deployed to the instance. * For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest. */ public Change outstandingChange(InstanceName instance) { StepStatus status = instanceSteps().get(instance); if (status == null) return Change.empty(); DeploymentInstanceSpec spec = application.deploymentSpec().requireInstance(instance); boolean ascending = next == spec.revisionTarget(); int cumulativeRisk = 0; int nextRisk = 0; int skippedCumulativeRisk = 0; Instant readySince = now; Optional<RevisionId> newestRevision = application.productionDeployments() .getOrDefault(instance, List.of()).stream() .map(Deployment::revision).max(naturalOrder()); Change candidate = Change.empty(); for (ApplicationVersion version : application.revisions().deployable(ascending)) { Change change = Change.of(version.id()); if ( newestRevision.isPresent() && change.downgrades(newestRevision.get()) || ! application.require(instance).change().revision().map(change::upgrades).orElse(true) || hasCompleted(instance, change)) { if (ascending) continue; else return Change.empty(); } skippedCumulativeRisk += version.risk(); nextRisk = nextRisk > 0 ? nextRisk : version.risk(); Optional<Instant> readyAt = status.dependenciesCompletedAt(Change.of(version.id()), Optional.empty()); if (readyAt.map(now::isBefore).orElse(true)) continue; cumulativeRisk += skippedCumulativeRisk; skippedCumulativeRisk = 0; nextRisk = 0; if (cumulativeRisk >= spec.maxRisk()) return candidate.equals(Change.empty()) ? change : candidate; if (readyAt.get().isBefore(readySince)) readySince = readyAt.get(); candidate = change; } return instanceJobs(instance).values().stream().allMatch(jobs -> jobs.lastTriggered().isEmpty()) || cumulativeRisk >= spec.minRisk() || cumulativeRisk + nextRisk > spec.maxRisk() || ! now.isBefore(readySince.plus(Duration.ofHours(spec.maxIdleHours()))) ? candidate : Change.empty(); } /** Earliest instant when job was triggered with given versions, or both system and staging tests were successful. */ public Optional<Instant> verifiedAt(JobId job, Versions versions) { Optional<Instant> triggeredAt = allJobs.get(job) .flatMap(status -> status.runs().values().stream() .filter(run -> run.versions().equals(versions)) .findFirst()) .map(Run::start); Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(null), versions); Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(null), versions); if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt; Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt; return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt; } /** Earliest instant when versions were tested for the given instance */ private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) { return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance())) .orElse(allJobs) .type(type).asList().stream() .flatMap(status -> RunList.from(status) .on(versions) .matching(run -> run.id().type().zone().equals(type.zone())) .matching(Run::hasSucceeded) .asList().stream() .map(Run::start)) .min(naturalOrder()); } private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { Map<JobId, List<Job>> jobs = new LinkedHashMap<>(); jobSteps.forEach((job, step) -> { if ( ! job.application().instance().equals(instance) || ! job.type().isProduction()) return; if (step.completedAt(change, Optional.of(job)).isPresent()) return; Optional<Deployment> deployment = deploymentFor(job); Optional<Version> existingPlatform = deployment.map(Deployment::version); Optional<RevisionId> existingRevision = deployment.map(Deployment::revision); boolean deployingCompatibilityChange = areIncompatible(existingPlatform, change.revision(), job) || areIncompatible(change.platform(), existingRevision, job); if (assumeUpgradesSucceed) { if (deployingCompatibilityChange) return; Change currentChange = application.require(instance).change(); Versions target = Versions.from(currentChange, application, deployment, fallbackPlatform(currentChange, job)); existingPlatform = Optional.of(target.targetPlatform()); existingRevision = Optional.of(target.targetRevision()); } List<Job> toRun = new ArrayList<>(); List<Change> changes = deployingCompatibilityChange ? List.of(change) : changes(job, step, change); for (Change partial : changes) { Job jobToRun = new Job(job.type(), Versions.from(partial, application, existingPlatform, existingRevision, fallbackPlatform(partial, job)), step.readyAt(partial, Optional.of(job)), partial); toRun.add(jobToRun); existingPlatform = Optional.of(jobToRun.versions.targetPlatform()); existingRevision = Optional.of(jobToRun.versions.targetRevision()); } jobs.put(job, toRun); }); return jobs; } private boolean areIncompatible(Optional<Version> platform, Optional<RevisionId> revision, JobId job) { Optional<Version> compileVersion = revision.map(application.revisions()::get) .flatMap(ApplicationVersion::compileVersion); return platform.isPresent() && compileVersion.isPresent() && versionCompatibility.apply(job.application().instance()).refuse(platform.get(), compileVersion.get()); } /** Changes to deploy with the given job, possibly split in two steps. */ private List<Change> changes(JobId job, StepStatus step, Change change) { if (change.platform().isEmpty() || change.revision().isEmpty() || change.isPinned()) return List.of(change); if ( step.completedAt(change.withoutApplication(), Optional.of(job)).isPresent() || step.completedAt(change.withoutPlatform(), Optional.of(job)).isPresent()) return List.of(change); JobId deployment = new JobId(job.application(), JobType.deploymentTo(job.type().zone())); UpgradeRollout rollout = application.deploymentSpec().requireInstance(job.application().instance()).upgradeRollout(); if (job.type().isTest()) { Optional<Instant> platformDeployedAt = jobSteps.get(deployment).completedAt(change.withoutApplication(), Optional.of(deployment)); Optional<Instant> revisionDeployedAt = jobSteps.get(deployment).completedAt(change.withoutPlatform(), Optional.of(deployment)); if (platformDeployedAt.isEmpty() && revisionDeployedAt.isPresent()) return List.of(change.withoutPlatform(), change); if (platformDeployedAt.isPresent() && revisionDeployedAt.isEmpty()) { if (jobSteps.get(deployment).readyAt(change, Optional.of(deployment)) .map(ready -> ! now.isBefore(ready)).orElse(false)) { switch (rollout) { case separate: return hasFailures(jobSteps.get(deployment), jobSteps.get(job)) ? List.of(change) : List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } return List.of(change.withoutApplication(), change); } } Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job)); Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job)); if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) { switch (rollout) { case separate: return List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } if (platformReadyAt.isEmpty()) return List.of(change.withoutPlatform(), change); if (revisionReadyAt.isEmpty()) { return List.of(change.withoutApplication(), change); } boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get()); boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get()); boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type())) .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), systemVersion)) .isEmpty(); switch (rollout) { case separate: return (platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) ? step.job().flatMap(jobs()::get).flatMap(JobStatus::firstFailing).isPresent() || failingUpgradeOnlyTests ? List.of(change) : List.of(change.withoutApplication(), change) : revisionReadyFirst ? List.of(change.withoutPlatform(), change) : List.of(change); case leading: return List.of(change); case simultaneous: return platformReadyFirst ? List.of(change) : List.of(change.withoutPlatform(), change); default: throw new IllegalStateException("Unknown upgrade rollout policy"); } } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) { Map<JobId, List<Job>> testJobs = new LinkedHashMap<>(); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { for (Job productionJob : versionsList) if (allJobs.successOn(testType, productionJob.versions()).asList().isEmpty()) testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps().get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); }); } } }); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { for (Job productionJob : versionsList) if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(testType, productionJob.versions()).asList().isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone()) && testJobs.get(test).stream().anyMatch(testJob -> testJob.versions().equals(productionJob.versions())))) { JobId testJob = firstDeclaredOrElseImplicitTest(testType); testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps.get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(testJobs); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .filter(jobSteps::containsKey) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.containsKey(jobId) && jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps, Function<JobId, JobStatus> jobs) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null, jobs, instanceWithImplicitTest(test, spec), instanceWithImplicitTest(staging, spec)); return Collections.unmodifiableMap(dependencies); } private static InstanceName instanceWithImplicitTest(Environment environment, DeploymentSpec spec) { InstanceName first = null; for (DeploymentInstanceSpec step : spec.instances()) { if (step.concerns(environment)) return null; first = first != null ? first : step.name(); } return first; } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs, InstanceName implicitSystemTest, InstanceName implicitStagingTest) { if (step.steps().isEmpty() && ! (step instanceof DeploymentInstanceSpec)) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; JobId jobId; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = step.concerns(test) ? systemTest(null) : stagingTest(null); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, jobs.apply(jobId), true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.test(((DeclaredTest) step).region()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.prod(((DeclaredZone) step).region().get()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else return previous; allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); if (instance.equals(implicitSystemTest)) { JobId job = new JobId(application.id().instance(instance), systemTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } if (instance.equals(implicitStagingTest)) { JobId job = new JobId(application.id().instance(instance), stagingTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(staging), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { Instant latest = Instant.EPOCH; for (StepStatus step : dependencies) { Optional<Instant> completedAt = step.completedAt(change, dependent); if (completedAt.isEmpty()) return Optional.empty(); latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest; } return Optional.of(latest); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** The time at which this step is ready to run the specified change and / or versions. */ @Override public Optional<Instant> readyAt(Change change) { return status.jobSteps.keySet().stream() .filter(job -> job.type().isProduction() && job.application().instance().equals(instance.name())) .map(job -> super.readyAt(change, Optional.of(job))) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().isBefore(o.get()) ? n : o) .orElseGet(() -> super.readyAt(change, Optional.empty())); } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.revision().isEmpty() || change.revision().equals(instance.change().revision())) || step().steps().stream().noneMatch(step -> step.concerns(prod))) ? dependenciesCompletedAt(change, dependent).or(() -> Optional.of(Instant.EPOCH).filter(__ -> change.hasTargets())) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.revision().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.revision().isPresent() && ! change.revision().get().equals(lastVersions.targetRevision())) return Optional.empty(); if (job.id().type().environment().isTest() && job.isNodeAllocationFailure()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(job.id().application().instance()) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> testedAt = status.verifiedAt(job.id(), Versions.from(change, status.application, existingDeployment, status.fallbackPlatform(change, job.id()))); if (readyAt.isEmpty() || testedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(testedAt.get()) ? readyAt : testedAt; } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); if ( change.revision().isPresent() && ! existingDeployment.map(Deployment::revision).equals(change.revision()) && dependent.equals(job())) return Optional.empty(); Change fullChange = status.application().require(job.id().application().instance()).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.revision())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.revision()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); Optional<Instant> end = Optional.empty(); for (Run run : job.runs().descendingMap().values()) { if (run.versions().targetsMatch(change)) { if (run.hasSucceeded()) end = run.end(); } else if (dependent.equals(job())) break; } return end; } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { JobId prodId = new JobId(job.id().application(), JobType.deploymentTo(job.id().type().zone())); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); if (readyAt.isEmpty() || deployedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(deployedAt.get()) ? readyAt : deployedAt; } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); return (dependent.equals(job()) ? job.lastTriggered().filter(run -> deployedAt.map(at -> ! run.start().isBefore(at)).orElse(false)).stream() : job.runs().values().stream()) .filter(Run::hasSucceeded) .filter(run -> run.versions().targetsMatch(change)) .flatMap(run -> run.end().stream()).findFirst(); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job, boolean declared) { return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return RunList.from(job) .matching(run -> dependent.flatMap(status::deploymentFor) .map(deployment -> run.versions().targetsMatch(Versions.from(change, status.application, Optional.of(deployment), status.fallbackPlatform(change, dependent.get())))) .orElseGet(() -> (change.platform().isEmpty() || change.platform().get().equals(run.versions().targetPlatform())) && (change.revision().isEmpty() || change.revision().get().equals(run.versions().targetRevision())))) .matching(Run::hasSucceeded) .matching(run -> dependent.isEmpty() || status.findCloud(dependent.get().type()).equals(status.findCloud(run.id().type()))) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } public static class Job { private final JobType type; private final Versions versions; private final Optional<Instant> readyAt; private final Change change; public Job(JobType type, Versions versions, Optional<Instant> readyAt, Change change) { this.type = type; this.versions = type.isSystemTest() ? versions.withoutSources() : versions; this.readyAt = readyAt; this.change = change; } public JobType type() { return type; } public Versions versions() { return versions; } public Optional<Instant> readyAt() { return readyAt; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Job job = (Job) o; return type.zone().equals(job.type.zone()) && versions.equals(job.versions) && readyAt.equals(job.readyAt) && change.equals(job.change); } @Override public int hashCode() { return Objects.hash(type.zone(), versions, readyAt, change); } @Override public String toString() { return change + " with versions " + versions + ", ready at " + readyAt; } } }
I guess. The non empty case is covered below (albeit not in the context of a CloudTenant) - the normal case is that it is empty. I can create another test.
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EmptyInfo); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
TenantInfo.EmptyInfo);
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test @Test public void cloud_tenant_with_info() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY.withName("Ofni Tnanet")); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.info(), serialized.info()); } @Test public void cloud_tenant_with_tenant_info_partial() { TenantInfo partialInfo = TenantInfo.EMPTY .withAddress(TenantInfoAddress.EMPTY.withCity("Hønefoss")); Slime slime = new Slime(); Cursor parentObject = slime.setObject(); serializer.toSlime(partialInfo, parentObject); assertEquals("{\"info\":{\"name\":\"\",\"email\":\"\",\"website\":\"\",\"invoiceEmail\":\"\",\"contactName\":\"\",\"contactEmail\":\"\",\"address\":{\"addressLines\":\"\",\"postalCodeOrZip\":\"\",\"city\":\"Hønefoss\",\"stateRegionProvince\":\"\",\"country\":\"\"}}}", slime.toString()); } @Test public void cloud_tenant_with_tenant_info_full() { TenantInfo fullInfo = TenantInfo.EMPTY .withName("My Company") .withEmail("email@mycomp.any") .withWebsite("http: .withContactEmail("ceo@mycomp.any") .withContactName("My Name") .withInvoiceEmail("invoice@mycomp.any") .withAddress(TenantInfoAddress.EMPTY .withCity("Hønefoss") .withAddressLines("Riperbakken 2") .withCountry("Norway") .withPostalCodeOrZip("3510") .withStateRegionProvince("Viken")) .withBillingContact(TenantInfoBillingContact.EMPTY .withEmail("thomas@sodor.com") .withName("Thomas The Tank Engine") .withPhone("NA") .withAddress(TenantInfoAddress.EMPTY .withCity("Suddery") .withCountry("Sodor") .withAddressLines("Central Station") .withStateRegionProvince("Irish Sea"))); Slime slime = new Slime(); Cursor parentCursor = slime.setObject(); serializer.toSlime(fullInfo, parentCursor); TenantInfo roundTripInfo = serializer.tenantInfoFromSlime(parentCursor.field("info")); assertEquals(fullInfo, roundTripInfo); } private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
Yes, never mind! You should still be able to simplify `isEmpty` though, by just comparing with empty.
public boolean isEmpty() { return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty() && address.isEmpty() && billingContact.isEmpty(); }
return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty()
public boolean isEmpty() { return this.equals(EMPTY); }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static TenantInfo EmptyInfo = new TenantInfo("","","", "", "", "", TenantInfoAddress.EmptyAddress, TenantInfoBillingContact.EmptyBillingContact); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static final TenantInfo EMPTY = new TenantInfo("","","", "", "", "", TenantInfoAddress.EMPTY, TenantInfoBillingContact.EMPTY); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TenantInfo that = (TenantInfo) o; return name.equals(that.name) && email.equals(that.email) && website.equals(that.website) && contactName.equals(that.contactName) && contactEmail.equals(that.contactEmail) && invoiceEmail.equals(that.invoiceEmail) && address.equals(that.address) && billingContact.equals(that.billingContact); } @Override public int hashCode() { return Objects.hash(name, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } }
Right, I didn't notice the other tests.
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EmptyInfo); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
TenantInfo.EmptyInfo);
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test @Test public void cloud_tenant_with_info() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY.withName("Ofni Tnanet")); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.info(), serialized.info()); } @Test public void cloud_tenant_with_tenant_info_partial() { TenantInfo partialInfo = TenantInfo.EMPTY .withAddress(TenantInfoAddress.EMPTY.withCity("Hønefoss")); Slime slime = new Slime(); Cursor parentObject = slime.setObject(); serializer.toSlime(partialInfo, parentObject); assertEquals("{\"info\":{\"name\":\"\",\"email\":\"\",\"website\":\"\",\"invoiceEmail\":\"\",\"contactName\":\"\",\"contactEmail\":\"\",\"address\":{\"addressLines\":\"\",\"postalCodeOrZip\":\"\",\"city\":\"Hønefoss\",\"stateRegionProvince\":\"\",\"country\":\"\"}}}", slime.toString()); } @Test public void cloud_tenant_with_tenant_info_full() { TenantInfo fullInfo = TenantInfo.EMPTY .withName("My Company") .withEmail("email@mycomp.any") .withWebsite("http: .withContactEmail("ceo@mycomp.any") .withContactName("My Name") .withInvoiceEmail("invoice@mycomp.any") .withAddress(TenantInfoAddress.EMPTY .withCity("Hønefoss") .withAddressLines("Riperbakken 2") .withCountry("Norway") .withPostalCodeOrZip("3510") .withStateRegionProvince("Viken")) .withBillingContact(TenantInfoBillingContact.EMPTY .withEmail("thomas@sodor.com") .withName("Thomas The Tank Engine") .withPhone("NA") .withAddress(TenantInfoAddress.EMPTY .withCity("Suddery") .withCountry("Sodor") .withAddressLines("Central Station") .withStateRegionProvince("Irish Sea"))); Slime slime = new Slime(); Cursor parentCursor = slime.setObject(); serializer.toSlime(fullInfo, parentCursor); TenantInfo roundTripInfo = serializer.tenantInfoFromSlime(parentCursor.field("info")); assertEquals(fullInfo, roundTripInfo); } private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
fixed
public boolean isEmpty() { return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty() && address.isEmpty() && billingContact.isEmpty(); }
return (name + email + website + contactEmail + contactName + invoiceEmail).isEmpty()
public boolean isEmpty() { return this.equals(EMPTY); }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static TenantInfo EmptyInfo = new TenantInfo("","","", "", "", "", TenantInfoAddress.EmptyAddress, TenantInfoBillingContact.EmptyBillingContact); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } }
class TenantInfo { private final String name; private final String email; private final String website; private final String contactName; private final String contactEmail; private final String invoiceEmail; private final TenantInfoAddress address; private final TenantInfoBillingContact billingContact; TenantInfo(String name, String email, String website, String contactName, String contactEmail, String invoiceEmail, TenantInfoAddress address, TenantInfoBillingContact billingContact) { this.name = Objects.requireNonNull(name); this.email = Objects.requireNonNull(email); this.website = Objects.requireNonNull(website); this.contactName = Objects.requireNonNull(contactName); this.contactEmail = Objects.requireNonNull(contactEmail); this.invoiceEmail = Objects.requireNonNull(invoiceEmail); this.address = Objects.requireNonNull(address); this.billingContact = Objects.requireNonNull(billingContact); } public static final TenantInfo EMPTY = new TenantInfo("","","", "", "", "", TenantInfoAddress.EMPTY, TenantInfoBillingContact.EMPTY); public String name() { return name; } public String email() { return email; } public String website() { return website; } public String contactName() { return contactName; } public String contactEmail() { return contactEmail; } public String invoiceEmail() { return invoiceEmail; } public TenantInfoAddress address() { return address; } public TenantInfoBillingContact billingContact() { return billingContact; } public TenantInfo withName(String newName) { return new TenantInfo(newName, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withEmail(String newEmail) { return new TenantInfo(name, newEmail, website, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withWebsite(String newWebsite) { return new TenantInfo(name, email, newWebsite, contactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactName(String newContactName) { return new TenantInfo(name, email, website, newContactName, contactEmail, invoiceEmail, address, billingContact); } public TenantInfo withContactEmail(String newContactEmail) { return new TenantInfo(name, email, website, contactName, newContactEmail, invoiceEmail, address, billingContact); } public TenantInfo withInvoiceEmail(String newInvoiceEmail) { return new TenantInfo(name, email, website, contactName, contactEmail, newInvoiceEmail, address, billingContact); } public TenantInfo withAddress(TenantInfoAddress newAddress) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, newAddress, billingContact); } public TenantInfo withBillingContact(TenantInfoBillingContact newBillingContact) { return new TenantInfo(name, email, website, contactName, contactEmail, invoiceEmail, address, newBillingContact); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TenantInfo that = (TenantInfo) o; return name.equals(that.name) && email.equals(that.email) && website.equals(that.website) && contactName.equals(that.contactName) && contactEmail.equals(that.contactEmail) && invoiceEmail.equals(that.invoiceEmail) && address.equals(that.address) && billingContact.equals(that.billingContact); } @Override public int hashCode() { return Objects.hash(name, email, website, contactName, contactEmail, invoiceEmail, address, billingContact); } }
Fixed. Added one extra test there to cover that the info field is set correctly in the tenant object.
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EmptyInfo); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
TenantInfo.EmptyInfo);
public void cloud_tenant() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.creator(), serialized.creator()); assertEquals(tenant.developerKeys(), serialized.developerKeys()); }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
class TenantSerializerTest { private static final TenantSerializer serializer = new TenantSerializer(); private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" + "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" + "-----END PUBLIC KEY-----\n"); private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" + "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" + "-----END PUBLIC KEY-----\n"); @Test public void athenz_tenant() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1"))); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.name(), serialized.name()); assertEquals(tenant.domain(), serialized.domain()); assertEquals(tenant.property(), serialized.property()); assertTrue(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_without_property_id() { AthenzTenant tenant = AthenzTenant.create(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.empty()); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertFalse(serialized.propertyId().isPresent()); assertEquals(tenant.propertyId(), serialized.propertyId()); } @Test public void athenz_tenant_with_contact() { AthenzTenant tenant = new AthenzTenant(TenantName.from("athenz-tenant"), new AthenzDomain("domain1"), new Property("property1"), Optional.of(new PropertyId("1")), Optional.of(contact())); AthenzTenant serialized = (AthenzTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.contact(), serialized.contact()); } @Test @Test public void cloud_tenant_with_info() { CloudTenant tenant = new CloudTenant(TenantName.from("elderly-lady"), Optional.of(new SimplePrincipal("foobar-user")), ImmutableBiMap.of(publicKey, new SimplePrincipal("joe"), otherPublicKey, new SimplePrincipal("jane")), TenantInfo.EMPTY.withName("Ofni Tnanet")); CloudTenant serialized = (CloudTenant) serializer.tenantFrom(serializer.toSlime(tenant)); assertEquals(tenant.info(), serialized.info()); } @Test public void cloud_tenant_with_tenant_info_partial() { TenantInfo partialInfo = TenantInfo.EMPTY .withAddress(TenantInfoAddress.EMPTY.withCity("Hønefoss")); Slime slime = new Slime(); Cursor parentObject = slime.setObject(); serializer.toSlime(partialInfo, parentObject); assertEquals("{\"info\":{\"name\":\"\",\"email\":\"\",\"website\":\"\",\"invoiceEmail\":\"\",\"contactName\":\"\",\"contactEmail\":\"\",\"address\":{\"addressLines\":\"\",\"postalCodeOrZip\":\"\",\"city\":\"Hønefoss\",\"stateRegionProvince\":\"\",\"country\":\"\"}}}", slime.toString()); } @Test public void cloud_tenant_with_tenant_info_full() { TenantInfo fullInfo = TenantInfo.EMPTY .withName("My Company") .withEmail("email@mycomp.any") .withWebsite("http: .withContactEmail("ceo@mycomp.any") .withContactName("My Name") .withInvoiceEmail("invoice@mycomp.any") .withAddress(TenantInfoAddress.EMPTY .withCity("Hønefoss") .withAddressLines("Riperbakken 2") .withCountry("Norway") .withPostalCodeOrZip("3510") .withStateRegionProvince("Viken")) .withBillingContact(TenantInfoBillingContact.EMPTY .withEmail("thomas@sodor.com") .withName("Thomas The Tank Engine") .withPhone("NA") .withAddress(TenantInfoAddress.EMPTY .withCity("Suddery") .withCountry("Sodor") .withAddressLines("Central Station") .withStateRegionProvince("Irish Sea"))); Slime slime = new Slime(); Cursor parentCursor = slime.setObject(); serializer.toSlime(fullInfo, parentCursor); TenantInfo roundTripInfo = serializer.tenantInfoFromSlime(parentCursor.field("info")); assertEquals(fullInfo, roundTripInfo); } private Contact contact() { return new Contact( URI.create("http: URI.create("http: URI.create("http: List.of( Collections.singletonList("person1"), Collections.singletonList("person2") ), "queue", Optional.empty() ); } }
Consider adding some additional validation of tenant id
private static void verifyValues(JsonNode root) { var cursor = new JsonAccessor(root); cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> { var dimension = condition.get("dimension"); if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.APPLICATION_ID))) { condition.get("values").forEachArrayElement(conditionValue -> { String applicationIdString = conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string application ID: " + conditionValue)); ApplicationId.fromSerializedForm(applicationIdString); }); } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.NODE_TYPE))) { condition.get("values").forEachArrayElement(conditionValue -> { String nodeTypeString = conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string node type: " + conditionValue)); NodeType.valueOf(nodeTypeString); }); } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.CONSOLE_USER_EMAIL))) { condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string email address: " + conditionValue))); } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.TENANT_ID))) { condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string tenant ID: " + conditionValue))); } })); }
condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString()
private static void verifyValues(JsonNode root) { var cursor = new JsonAccessor(root); cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> { var dimension = condition.get("dimension"); if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.APPLICATION_ID))) { condition.get("values").forEachArrayElement(conditionValue -> { String applicationIdString = conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string application ID: " + conditionValue)); ApplicationId.fromSerializedForm(applicationIdString); }); } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.NODE_TYPE))) { condition.get("values").forEachArrayElement(conditionValue -> { String nodeTypeString = conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string node type: " + conditionValue)); NodeType.valueOf(nodeTypeString); }); } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.CONSOLE_USER_EMAIL))) { condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string email address: " + conditionValue))); } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.TENANT_ID))) { condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() .orElseThrow(() -> new IllegalArgumentException("Non-string tenant ID: " + conditionValue))); } })); }
class SystemFlagsDataArchive { private static final ObjectMapper mapper = new ObjectMapper(); private final Map<FlagId, Map<String, FlagData>> files; private SystemFlagsDataArchive(Map<FlagId, Map<String, FlagData>> files) { this.files = files; } public static SystemFlagsDataArchive fromZip(InputStream rawIn) { Builder builder = new Builder(); try (ZipInputStream zipIn = new ZipInputStream(new BufferedInputStream(rawIn))) { ZipEntry entry; while ((entry = zipIn.getNextEntry()) != null) { String name = entry.getName(); if (!entry.isDirectory() && name.startsWith("flags/")) { Path filePath = Paths.get(name); String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8); addFile(builder, rawData, filePath); } } return builder.build(); } catch (IOException e) { throw new UncheckedIOException(e); } } public static SystemFlagsDataArchive fromDirectory(Path directory) { Path root = directory.toAbsolutePath(); Path flagsDirectory = directory.resolve("flags"); if (!Files.isDirectory(flagsDirectory)) { throw new IllegalArgumentException("Sub-directory 'flags' does not exist: " + flagsDirectory); } try (Stream<Path> directoryStream = Files.walk(root)) { Builder builder = new Builder(); directoryStream.forEach(absolutePath -> { Path relativePath = root.relativize(absolutePath); if (!Files.isDirectory(absolutePath) && relativePath.startsWith("flags")) { String rawData = uncheck(() -> Files.readString(absolutePath, StandardCharsets.UTF_8)); addFile(builder, rawData, relativePath); } }); return builder.build(); } catch (IOException e) { throw new UncheckedIOException(e); } } public void toZip(OutputStream out) { ZipOutputStream zipOut = new ZipOutputStream(out); files.forEach((flagId, fileMap) -> { fileMap.forEach((filename, flagData) -> { uncheck(() -> { zipOut.putNextEntry(new ZipEntry(toFilePath(flagId, filename))); zipOut.write(flagData.serializeToUtf8Json()); zipOut.closeEntry(); }); }); }); uncheck(zipOut::flush); } public Set<FlagData> flagData(FlagsTarget target) { List<String> filenames = target.flagDataFilesPrioritized(); Set<FlagData> targetData = new HashSet<>(); files.forEach((flagId, fileMap) -> { for (String filename : filenames) { FlagData data = fileMap.get(filename); if (data != null) { if (!data.isEmpty()) { targetData.add(data); } return; } } }); return targetData; } public void validateAllFilesAreForTargets(SystemName currentSystem, Set<FlagsTarget> targets) throws IllegalArgumentException { Set<String> validFiles = targets.stream() .flatMap(target -> target.flagDataFilesPrioritized().stream()) .collect(Collectors.toSet()); Set<SystemName> otherSystems = Arrays.stream(SystemName.values()) .filter(systemName -> systemName != currentSystem) .collect(Collectors.toSet()); files.forEach((flagId, fileMap) -> { for (String filename : fileMap.keySet()) { boolean isFileForOtherSystem = otherSystems.stream() .anyMatch(system -> filename.startsWith(system.value() + ".")); boolean isFileForCurrentSystem = validFiles.contains(filename); if (!isFileForOtherSystem && !isFileForCurrentSystem) { throw new IllegalArgumentException("Unknown flag file: " + toFilePath(flagId, filename)); } } }); } private static void addFile(Builder builder, String rawData, Path filePath) { String filename = filePath.getFileName().toString(); if (filename.startsWith(".")) { return; } if (!filename.endsWith(".json")) { throw new IllegalArgumentException(String.format("Only JSON files are allowed in 'flags/' directory (found '%s')", filePath.toString())); } FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString()); FlagData flagData; if (rawData.isBlank()) { flagData = new FlagData(directoryDeducedFlagId); } else { String normalizedRawData = normalizeJson(rawData); flagData = FlagData.deserialize(normalizedRawData); if (!directoryDeducedFlagId.equals(flagData.id())) { throw new IllegalArgumentException( String.format("Flag data file with flag id '%s' in directory for '%s'", flagData.id(), directoryDeducedFlagId.toString())); } String serializedData = flagData.serializeToJson(); if (!JSON.equals(serializedData, normalizedRawData)) { throw new IllegalArgumentException(filePath + " contains unknown non-comment fields: " + "after removing any comment fields the JSON is:\n " + normalizedRawData + "\nbut deserializing this ended up with a JSON that are missing some of the fields:\n " + serializedData + "\nSee https: } } if (builder.hasFile(filename, flagData)) { throw new IllegalArgumentException( String.format("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!", filePath, flagData.id())); } builder.addFile(filename, flagData); } static String normalizeJson(String json) { JsonNode root = uncheck(() -> mapper.readTree(json)); removeCommentsRecursively(root); verifyValues(root); return root.toString(); } private static void removeCommentsRecursively(JsonNode node) { if (node instanceof ObjectNode) { ObjectNode objectNode = (ObjectNode) node; objectNode.remove("comment"); } node.forEach(SystemFlagsDataArchive::removeCommentsRecursively); } private static String toFilePath(FlagId flagId, String filename) { return "flags/" + flagId.toString() + "/" + filename; } public static class Builder { private final Map<FlagId, Map<String, FlagData>> files = new TreeMap<>(); public Builder() {} public Builder addFile(String filename, FlagData data) { files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data); return this; } public boolean hasFile(String filename, FlagData data) { return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename); } public SystemFlagsDataArchive build() { Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>(); files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map))); return new SystemFlagsDataArchive(copy); } } private static class JsonAccessor { private final JsonNode jsonNode; public JsonAccessor(JsonNode jsonNode) { this.jsonNode = jsonNode; } public JsonAccessor get(String fieldName) { if (jsonNode == null) { return this; } else { return new JsonAccessor(jsonNode.get(fieldName)); } } public Optional<String> asString() { return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty(); } public void forEachArrayElement(Consumer<JsonAccessor> consumer) { if (jsonNode != null && jsonNode.isArray()) { jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement))); } } /** Returns true if this (JsonNode) is a string and equal to value. */ public boolean isEqualTo(String value) { return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value); } @Override public String toString() { return jsonNode == null ? "undefined" : jsonNode.toString(); } } }
class SystemFlagsDataArchive { private static final ObjectMapper mapper = new ObjectMapper(); private final Map<FlagId, Map<String, FlagData>> files; private SystemFlagsDataArchive(Map<FlagId, Map<String, FlagData>> files) { this.files = files; } public static SystemFlagsDataArchive fromZip(InputStream rawIn) { Builder builder = new Builder(); try (ZipInputStream zipIn = new ZipInputStream(new BufferedInputStream(rawIn))) { ZipEntry entry; while ((entry = zipIn.getNextEntry()) != null) { String name = entry.getName(); if (!entry.isDirectory() && name.startsWith("flags/")) { Path filePath = Paths.get(name); String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8); addFile(builder, rawData, filePath); } } return builder.build(); } catch (IOException e) { throw new UncheckedIOException(e); } } public static SystemFlagsDataArchive fromDirectory(Path directory) { Path root = directory.toAbsolutePath(); Path flagsDirectory = directory.resolve("flags"); if (!Files.isDirectory(flagsDirectory)) { throw new IllegalArgumentException("Sub-directory 'flags' does not exist: " + flagsDirectory); } try (Stream<Path> directoryStream = Files.walk(root)) { Builder builder = new Builder(); directoryStream.forEach(absolutePath -> { Path relativePath = root.relativize(absolutePath); if (!Files.isDirectory(absolutePath) && relativePath.startsWith("flags")) { String rawData = uncheck(() -> Files.readString(absolutePath, StandardCharsets.UTF_8)); addFile(builder, rawData, relativePath); } }); return builder.build(); } catch (IOException e) { throw new UncheckedIOException(e); } } public void toZip(OutputStream out) { ZipOutputStream zipOut = new ZipOutputStream(out); files.forEach((flagId, fileMap) -> { fileMap.forEach((filename, flagData) -> { uncheck(() -> { zipOut.putNextEntry(new ZipEntry(toFilePath(flagId, filename))); zipOut.write(flagData.serializeToUtf8Json()); zipOut.closeEntry(); }); }); }); uncheck(zipOut::flush); } public Set<FlagData> flagData(FlagsTarget target) { List<String> filenames = target.flagDataFilesPrioritized(); Set<FlagData> targetData = new HashSet<>(); files.forEach((flagId, fileMap) -> { for (String filename : filenames) { FlagData data = fileMap.get(filename); if (data != null) { if (!data.isEmpty()) { targetData.add(data); } return; } } }); return targetData; } public void validateAllFilesAreForTargets(SystemName currentSystem, Set<FlagsTarget> targets) throws IllegalArgumentException { Set<String> validFiles = targets.stream() .flatMap(target -> target.flagDataFilesPrioritized().stream()) .collect(Collectors.toSet()); Set<SystemName> otherSystems = Arrays.stream(SystemName.values()) .filter(systemName -> systemName != currentSystem) .collect(Collectors.toSet()); files.forEach((flagId, fileMap) -> { for (String filename : fileMap.keySet()) { boolean isFileForOtherSystem = otherSystems.stream() .anyMatch(system -> filename.startsWith(system.value() + ".")); boolean isFileForCurrentSystem = validFiles.contains(filename); if (!isFileForOtherSystem && !isFileForCurrentSystem) { throw new IllegalArgumentException("Unknown flag file: " + toFilePath(flagId, filename)); } } }); } private static void addFile(Builder builder, String rawData, Path filePath) { String filename = filePath.getFileName().toString(); if (filename.startsWith(".")) { return; } if (!filename.endsWith(".json")) { throw new IllegalArgumentException(String.format("Only JSON files are allowed in 'flags/' directory (found '%s')", filePath.toString())); } FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString()); FlagData flagData; if (rawData.isBlank()) { flagData = new FlagData(directoryDeducedFlagId); } else { String normalizedRawData = normalizeJson(rawData); flagData = FlagData.deserialize(normalizedRawData); if (!directoryDeducedFlagId.equals(flagData.id())) { throw new IllegalArgumentException( String.format("Flag data file with flag id '%s' in directory for '%s'", flagData.id(), directoryDeducedFlagId.toString())); } String serializedData = flagData.serializeToJson(); if (!JSON.equals(serializedData, normalizedRawData)) { throw new IllegalArgumentException(filePath + " contains unknown non-comment fields: " + "after removing any comment fields the JSON is:\n " + normalizedRawData + "\nbut deserializing this ended up with a JSON that are missing some of the fields:\n " + serializedData + "\nSee https: } } if (builder.hasFile(filename, flagData)) { throw new IllegalArgumentException( String.format("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!", filePath, flagData.id())); } builder.addFile(filename, flagData); } static String normalizeJson(String json) { JsonNode root = uncheck(() -> mapper.readTree(json)); removeCommentsRecursively(root); verifyValues(root); return root.toString(); } private static void removeCommentsRecursively(JsonNode node) { if (node instanceof ObjectNode) { ObjectNode objectNode = (ObjectNode) node; objectNode.remove("comment"); } node.forEach(SystemFlagsDataArchive::removeCommentsRecursively); } private static String toFilePath(FlagId flagId, String filename) { return "flags/" + flagId.toString() + "/" + filename; } public static class Builder { private final Map<FlagId, Map<String, FlagData>> files = new TreeMap<>(); public Builder() {} public Builder addFile(String filename, FlagData data) { files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data); return this; } public boolean hasFile(String filename, FlagData data) { return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename); } public SystemFlagsDataArchive build() { Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>(); files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map))); return new SystemFlagsDataArchive(copy); } } private static class JsonAccessor { private final JsonNode jsonNode; public JsonAccessor(JsonNode jsonNode) { this.jsonNode = jsonNode; } public JsonAccessor get(String fieldName) { if (jsonNode == null) { return this; } else { return new JsonAccessor(jsonNode.get(fieldName)); } } public Optional<String> asString() { return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty(); } public void forEachArrayElement(Consumer<JsonAccessor> consumer) { if (jsonNode != null && jsonNode.isArray()) { jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement))); } } /** Returns true if this (JsonNode) is a string and equal to value. */ public boolean isEqualTo(String value) { return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value); } @Override public String toString() { return jsonNode == null ? "undefined" : jsonNode.toString(); } } }
Not sure this first part is needed?
public List<NewDocumentType> getDocumentTypesWithIndexedCluster() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && hasIndexedCluster() && getIndexed().hasDocumentDB(type.getFullName().getName())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
if (findStreamingCluster(type.getFullName().getName()).isEmpty()
public List<NewDocumentType> getDocumentTypesWithIndexedCluster() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && hasIndexedCluster() && getIndexed().hasDocumentDB(type.getFullName().getName())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
It mimics the semantics of `getConfig`
public List<NewDocumentType> getDocumentTypesWithIndexedCluster() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && hasIndexedCluster() && getIndexed().hasDocumentDB(type.getFullName().getName())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
if (findStreamingCluster(type.getFullName().getName()).isEmpty()
public List<NewDocumentType> getDocumentTypesWithIndexedCluster() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && hasIndexedCluster() && getIndexed().hasDocumentDB(type.getFullName().getName())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
Yeah, if we need to separate streaming and store-only, we need it.
public List<NewDocumentType> getDocumentTypesWithIndexedCluster() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && hasIndexedCluster() && getIndexed().hasDocumentDB(type.getFullName().getName())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
if (findStreamingCluster(type.getFullName().getName()).isEmpty()
public List<NewDocumentType> getDocumentTypesWithIndexedCluster() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && hasIndexedCluster() && getIndexed().hasDocumentDB(type.getFullName().getName())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
Would be better to put this in a function imho
private void doElementSpecificProcessingOnOverride(List<Element> elements) { elements.forEach(element -> { if (element.getTagName().equals("nodes")) { boolean hasNodeChild = false; for (var child : XML.getChildren(element)) { if (child.getTagName().equals("node")) { hasNodeChild = true; break; } } if (!hasNodeChild) element.setAttribute("required", "true"); } }); }
boolean hasNodeChild = false;
private void doElementSpecificProcessingOnOverride(List<Element> elements) { elements.forEach(element -> { if (element.getTagName().equals("nodes")) if (!hasChildWithTagName(element, "node")) element.setAttribute("required", "true"); }); }
class OverrideProcessor implements PreProcessor { private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName()); private final InstanceName instance; private final Environment environment; private final RegionName region; private static final String ID_ATTRIBUTE = "id"; private static final String INSTANCE_ATTRIBUTE = "instance"; private static final String ENVIRONMENT_ATTRIBUTE = "environment"; private static final String REGION_ATTRIBUTE = "region"; public OverrideProcessor(InstanceName instance, Environment environment, RegionName region) { this.instance = instance; this.environment = environment; this.region = region; } public Document process(Document input) throws TransformerException { log.log(Level.FINE, "Preprocessing overrides with " + environment + "." + region); Document ret = Xml.copyDocument(input); Element root = ret.getDocumentElement(); applyOverrides(root, Context.empty()); return ret; } private void applyOverrides(Element parent, Context context) { context = getParentContext(parent, context); Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent)); retainOverriddenElements(elementsByTagName); for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) { pruneOverrides(parent, entry.getValue(), context); } for (Element child : XML.getChildren(parent)) { applyOverrides(child, context); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } } private Context getParentContext(Element parent, Context context) { Set<InstanceName> instances = context.instances; Set<Environment> environments = context.environments; Set<RegionName> regions = context.regions; if (instances.isEmpty()) instances = getInstances(parent); if (environments.isEmpty()) environments = getEnvironments(parent); if (regions.isEmpty()) regions = getRegions(parent); return Context.create(instances, environments, regions); } /** * Prune overrides from parent according to deploy override rules. * * @param parent parent {@link Element} above children. * @param children children where one {@link Element} will remain as the overriding element * @param context current context with instance, environment and region. */ private void pruneOverrides(Element parent, List<Element> children, Context context) { checkConsistentInheritance(children, context); pruneNonMatching(parent, children); retainMostSpecific(parent, children, context); } private void checkConsistentInheritance(List<Element> children, Context context) { for (Element child : children) { Set<InstanceName> instances = getInstances(child); if ( ! instances.isEmpty() && ! context.instances.isEmpty() && ! context.instances.containsAll(instances)) { throw new IllegalArgumentException("Instances in child (" + instances + ") are not a subset of those of the parent (" + context.instances + ") at " + child); } Set<Environment> environments = getEnvironments(child); if ( ! environments.isEmpty() && ! context.environments.isEmpty() && ! context.environments.containsAll(environments)) { throw new IllegalArgumentException("Environments in child (" + environments + ") are not a subset of those of the parent (" + context.environments + ") at " + child); } Set<RegionName> regions = getRegions(child); if ( ! regions.isEmpty() && ! context.regions.isEmpty() && ! context.regions.containsAll(regions)) { throw new IllegalArgumentException("Regions in child (" + regions + ") are not a subset of those of the parent (" + context.regions + ") at " + child); } } } /** * Prune elements that are not matching our environment and region */ private void pruneNonMatching(Element parent, List<Element> children) { Iterator<Element> elemIt = children.iterator(); while (elemIt.hasNext()) { Element child = elemIt.next(); if ( ! matches(getInstances(child), getEnvironments(child), getRegions(child))) { parent.removeChild(child); elemIt.remove(); } } } private boolean matches(Set<InstanceName> elementInstances, Set<Environment> elementEnvironments, Set<RegionName> elementRegions) { if ( ! elementInstances.isEmpty()) { if ( ! elementInstances.contains(instance)) return false; } if ( ! elementEnvironments.isEmpty()) { if ( ! elementEnvironments.contains(environment)) return false; } if ( ! elementRegions.isEmpty()) { if ( environment.isMultiRegion() && ! elementRegions.contains(region)) return false; if ( ! environment.isMultiRegion() && elementEnvironments.isEmpty() ) return false; } return true; } /** * Find the most specific element and remove all others. */ private void retainMostSpecific(Element parent, List<Element> children, Context context) { List<Element> bestMatches = new ArrayList<>(); int bestMatch = 0; for (Element child : children) { bestMatch = updateBestMatches(bestMatches, child, bestMatch, context); } if (bestMatch > 0) { doElementSpecificProcessingOnOverride(bestMatches); for (Element child : children) { if ( ! bestMatches.contains(child)) { parent.removeChild(child); } } } } private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) { int overrideCount = getNumberOfOverrides(child, context); if (overrideCount >= bestMatch) { if (overrideCount > bestMatch) bestMatches.clear(); bestMatches.add(child); return overrideCount; } else { return bestMatch; } } private int getNumberOfOverrides(Element child, Context context) { int currentMatch = 0; Set<InstanceName> elementInstances = hasInstance(child) ? getInstances(child) : context.instances; Set<Environment> elementEnvironments = hasEnvironment(child) ? getEnvironments(child) : context.environments; Set<RegionName> elementRegions = hasRegion(child) ? getRegions(child) : context.regions; if ( ! elementInstances.isEmpty() && elementInstances.contains(instance)) currentMatch++; if ( ! elementEnvironments.isEmpty() && elementEnvironments.contains(environment)) currentMatch++; if ( ! elementRegions.isEmpty() && elementRegions.contains(region)) currentMatch++; return currentMatch; } /** Called on each element which is selected by matching some override condition */ /** * Retains all elements where at least one element is overridden. Removes non-overridden elements from map. */ private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) { Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator(); while (it.hasNext()) { List<Element> elements = it.next().getValue(); boolean hasOverrides = false; for (Element element : elements) { if (hasEnvironment(element) || hasRegion(element)) { hasOverrides = true; } } if (!hasOverrides) { it.remove(); } } } private boolean hasInstance(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); } private boolean hasRegion(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } private boolean hasEnvironment(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); } private Set<InstanceName> getInstances(Element element) { String instance = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); if (instance == null || instance.isEmpty()) return Collections.emptySet(); return Arrays.stream(instance.split(" ")).map(InstanceName::from).collect(Collectors.toSet()); } private Set<Environment> getEnvironments(Element element) { String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); if (env == null || env.isEmpty()) return Collections.emptySet(); return Arrays.stream(env.split(" ")).map(Environment::from).collect(Collectors.toSet()); } private Set<RegionName> getRegions(Element element) { String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); if (reg == null || reg.isEmpty()) return Collections.emptySet(); return Arrays.stream(reg.split(" ")).map(RegionName::from).collect(Collectors.toSet()); } private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) { Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>(); for (Element child : children) { String key = child.getTagName(); if (child.hasAttribute(ID_ATTRIBUTE)) { key += child.getAttribute(ID_ATTRIBUTE); } if ( ! elementsByTagName.containsKey(key)) { elementsByTagName.put(key, new ArrayList<>()); } elementsByTagName.get(key).add(child); } return elementsByTagName; } private static String getPrintableElement(Element element) { StringBuilder sb = new StringBuilder(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ").append(attributes.item(i).getNodeName()); } return sb.toString(); } private static String getPrintableElementRecursive(Element element) { StringBuilder sb = new StringBuilder(); sb.append(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ") .append(attributes.item(i).getNodeName()) .append("=") .append(attributes.item(i).getNodeValue()); } final List<Element> children = XML.getChildren(element); if (children.size() > 0) { sb.append("\n"); for (Element e : children) sb.append("\t").append(getPrintableElementRecursive(e)); } return sb.toString(); } /** * Represents environment and region in a given context. */ private static final class Context { final Set<InstanceName> instances; final Set<Environment> environments; final Set<RegionName> regions; private Context(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { this.instances = Set.copyOf(instances); this.environments = Set.copyOf(environments); this.regions = Set.copyOf(regions); } static Context empty() { return new Context(Set.of(), Set.of(), Set.of()); } public static Context create(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { return new Context(instances, environments, regions); } } }
class OverrideProcessor implements PreProcessor { private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName()); private final InstanceName instance; private final Environment environment; private final RegionName region; private static final String ID_ATTRIBUTE = "id"; private static final String INSTANCE_ATTRIBUTE = "instance"; private static final String ENVIRONMENT_ATTRIBUTE = "environment"; private static final String REGION_ATTRIBUTE = "region"; public OverrideProcessor(InstanceName instance, Environment environment, RegionName region) { this.instance = instance; this.environment = environment; this.region = region; } public Document process(Document input) throws TransformerException { log.log(Level.FINE, "Preprocessing overrides with " + environment + "." + region); Document ret = Xml.copyDocument(input); Element root = ret.getDocumentElement(); applyOverrides(root, Context.empty()); return ret; } private void applyOverrides(Element parent, Context context) { context = getParentContext(parent, context); Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent)); retainOverriddenElements(elementsByTagName); for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) { pruneOverrides(parent, entry.getValue(), context); } for (Element child : XML.getChildren(parent)) { applyOverrides(child, context); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } } private Context getParentContext(Element parent, Context context) { Set<InstanceName> instances = context.instances; Set<Environment> environments = context.environments; Set<RegionName> regions = context.regions; if (instances.isEmpty()) instances = getInstances(parent); if (environments.isEmpty()) environments = getEnvironments(parent); if (regions.isEmpty()) regions = getRegions(parent); return Context.create(instances, environments, regions); } /** * Prune overrides from parent according to deploy override rules. * * @param parent parent {@link Element} above children. * @param children children where one {@link Element} will remain as the overriding element * @param context current context with instance, environment and region. */ private void pruneOverrides(Element parent, List<Element> children, Context context) { checkConsistentInheritance(children, context); pruneNonMatching(parent, children); retainMostSpecific(parent, children, context); } private void checkConsistentInheritance(List<Element> children, Context context) { for (Element child : children) { Set<InstanceName> instances = getInstances(child); if ( ! instances.isEmpty() && ! context.instances.isEmpty() && ! context.instances.containsAll(instances)) { throw new IllegalArgumentException("Instances in child (" + instances + ") are not a subset of those of the parent (" + context.instances + ") at " + child); } Set<Environment> environments = getEnvironments(child); if ( ! environments.isEmpty() && ! context.environments.isEmpty() && ! context.environments.containsAll(environments)) { throw new IllegalArgumentException("Environments in child (" + environments + ") are not a subset of those of the parent (" + context.environments + ") at " + child); } Set<RegionName> regions = getRegions(child); if ( ! regions.isEmpty() && ! context.regions.isEmpty() && ! context.regions.containsAll(regions)) { throw new IllegalArgumentException("Regions in child (" + regions + ") are not a subset of those of the parent (" + context.regions + ") at " + child); } } } /** * Prune elements that are not matching our environment and region */ private void pruneNonMatching(Element parent, List<Element> children) { Iterator<Element> elemIt = children.iterator(); while (elemIt.hasNext()) { Element child = elemIt.next(); if ( ! matches(getInstances(child), getEnvironments(child), getRegions(child))) { parent.removeChild(child); elemIt.remove(); } } } private boolean matches(Set<InstanceName> elementInstances, Set<Environment> elementEnvironments, Set<RegionName> elementRegions) { if ( ! elementInstances.isEmpty()) { if ( ! elementInstances.contains(instance)) return false; } if ( ! elementEnvironments.isEmpty()) { if ( ! elementEnvironments.contains(environment)) return false; } if ( ! elementRegions.isEmpty()) { if ( environment.isMultiRegion() && ! elementRegions.contains(region)) return false; if ( ! environment.isMultiRegion() && elementEnvironments.isEmpty() ) return false; } return true; } /** * Find the most specific element and remove all others. */ private void retainMostSpecific(Element parent, List<Element> children, Context context) { List<Element> bestMatches = new ArrayList<>(); int bestMatch = 0; for (Element child : children) { bestMatch = updateBestMatches(bestMatches, child, bestMatch, context); } if (bestMatch > 0) { doElementSpecificProcessingOnOverride(bestMatches); for (Element child : children) { if ( ! bestMatches.contains(child)) { parent.removeChild(child); } } } } private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) { int overrideCount = getNumberOfOverrides(child, context); if (overrideCount >= bestMatch) { if (overrideCount > bestMatch) bestMatches.clear(); bestMatches.add(child); return overrideCount; } else { return bestMatch; } } private int getNumberOfOverrides(Element child, Context context) { int currentMatch = 0; Set<InstanceName> elementInstances = hasInstance(child) ? getInstances(child) : context.instances; Set<Environment> elementEnvironments = hasEnvironment(child) ? getEnvironments(child) : context.environments; Set<RegionName> elementRegions = hasRegion(child) ? getRegions(child) : context.regions; if ( ! elementInstances.isEmpty() && elementInstances.contains(instance)) currentMatch++; if ( ! elementEnvironments.isEmpty() && elementEnvironments.contains(environment)) currentMatch++; if ( ! elementRegions.isEmpty() && elementRegions.contains(region)) currentMatch++; return currentMatch; } /** Called on each element which is selected by matching some override condition */ private static boolean hasChildWithTagName(Element element, String childName) { for (var child : XML.getChildren(element)) { if (child.getTagName().equals(childName)) return true; } return false; } /** * Retains all elements where at least one element is overridden. Removes non-overridden elements from map. */ private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) { Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator(); while (it.hasNext()) { List<Element> elements = it.next().getValue(); boolean hasOverrides = false; for (Element element : elements) { if (hasEnvironment(element) || hasRegion(element)) { hasOverrides = true; } } if (!hasOverrides) { it.remove(); } } } private boolean hasInstance(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); } private boolean hasRegion(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } private boolean hasEnvironment(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); } private Set<InstanceName> getInstances(Element element) { String instance = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); if (instance == null || instance.isEmpty()) return Collections.emptySet(); return Arrays.stream(instance.split(" ")).map(InstanceName::from).collect(Collectors.toSet()); } private Set<Environment> getEnvironments(Element element) { String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); if (env == null || env.isEmpty()) return Collections.emptySet(); return Arrays.stream(env.split(" ")).map(Environment::from).collect(Collectors.toSet()); } private Set<RegionName> getRegions(Element element) { String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); if (reg == null || reg.isEmpty()) return Collections.emptySet(); return Arrays.stream(reg.split(" ")).map(RegionName::from).collect(Collectors.toSet()); } private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) { Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>(); for (Element child : children) { String key = child.getTagName(); if (child.hasAttribute(ID_ATTRIBUTE)) { key += child.getAttribute(ID_ATTRIBUTE); } if ( ! elementsByTagName.containsKey(key)) { elementsByTagName.put(key, new ArrayList<>()); } elementsByTagName.get(key).add(child); } return elementsByTagName; } private static String getPrintableElement(Element element) { StringBuilder sb = new StringBuilder(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ").append(attributes.item(i).getNodeName()); } return sb.toString(); } private static String getPrintableElementRecursive(Element element) { StringBuilder sb = new StringBuilder(); sb.append(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ") .append(attributes.item(i).getNodeName()) .append("=") .append(attributes.item(i).getNodeValue()); } final List<Element> children = XML.getChildren(element); if (children.size() > 0) { sb.append("\n"); for (Element e : children) sb.append("\t").append(getPrintableElementRecursive(e)); } return sb.toString(); } /** * Represents environment and region in a given context. */ private static final class Context { final Set<InstanceName> instances; final Set<Environment> environments; final Set<RegionName> regions; private Context(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { this.instances = Set.copyOf(instances); this.environments = Set.copyOf(environments); this.regions = Set.copyOf(regions); } static Context empty() { return new Context(Set.of(), Set.of(), Set.of()); } public static Context create(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { return new Context(instances, environments, regions); } } }
Fixed
private void doElementSpecificProcessingOnOverride(List<Element> elements) { elements.forEach(element -> { if (element.getTagName().equals("nodes")) { boolean hasNodeChild = false; for (var child : XML.getChildren(element)) { if (child.getTagName().equals("node")) { hasNodeChild = true; break; } } if (!hasNodeChild) element.setAttribute("required", "true"); } }); }
boolean hasNodeChild = false;
private void doElementSpecificProcessingOnOverride(List<Element> elements) { elements.forEach(element -> { if (element.getTagName().equals("nodes")) if (!hasChildWithTagName(element, "node")) element.setAttribute("required", "true"); }); }
class OverrideProcessor implements PreProcessor { private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName()); private final InstanceName instance; private final Environment environment; private final RegionName region; private static final String ID_ATTRIBUTE = "id"; private static final String INSTANCE_ATTRIBUTE = "instance"; private static final String ENVIRONMENT_ATTRIBUTE = "environment"; private static final String REGION_ATTRIBUTE = "region"; public OverrideProcessor(InstanceName instance, Environment environment, RegionName region) { this.instance = instance; this.environment = environment; this.region = region; } public Document process(Document input) throws TransformerException { log.log(Level.FINE, "Preprocessing overrides with " + environment + "." + region); Document ret = Xml.copyDocument(input); Element root = ret.getDocumentElement(); applyOverrides(root, Context.empty()); return ret; } private void applyOverrides(Element parent, Context context) { context = getParentContext(parent, context); Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent)); retainOverriddenElements(elementsByTagName); for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) { pruneOverrides(parent, entry.getValue(), context); } for (Element child : XML.getChildren(parent)) { applyOverrides(child, context); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } } private Context getParentContext(Element parent, Context context) { Set<InstanceName> instances = context.instances; Set<Environment> environments = context.environments; Set<RegionName> regions = context.regions; if (instances.isEmpty()) instances = getInstances(parent); if (environments.isEmpty()) environments = getEnvironments(parent); if (regions.isEmpty()) regions = getRegions(parent); return Context.create(instances, environments, regions); } /** * Prune overrides from parent according to deploy override rules. * * @param parent parent {@link Element} above children. * @param children children where one {@link Element} will remain as the overriding element * @param context current context with instance, environment and region. */ private void pruneOverrides(Element parent, List<Element> children, Context context) { checkConsistentInheritance(children, context); pruneNonMatching(parent, children); retainMostSpecific(parent, children, context); } private void checkConsistentInheritance(List<Element> children, Context context) { for (Element child : children) { Set<InstanceName> instances = getInstances(child); if ( ! instances.isEmpty() && ! context.instances.isEmpty() && ! context.instances.containsAll(instances)) { throw new IllegalArgumentException("Instances in child (" + instances + ") are not a subset of those of the parent (" + context.instances + ") at " + child); } Set<Environment> environments = getEnvironments(child); if ( ! environments.isEmpty() && ! context.environments.isEmpty() && ! context.environments.containsAll(environments)) { throw new IllegalArgumentException("Environments in child (" + environments + ") are not a subset of those of the parent (" + context.environments + ") at " + child); } Set<RegionName> regions = getRegions(child); if ( ! regions.isEmpty() && ! context.regions.isEmpty() && ! context.regions.containsAll(regions)) { throw new IllegalArgumentException("Regions in child (" + regions + ") are not a subset of those of the parent (" + context.regions + ") at " + child); } } } /** * Prune elements that are not matching our environment and region */ private void pruneNonMatching(Element parent, List<Element> children) { Iterator<Element> elemIt = children.iterator(); while (elemIt.hasNext()) { Element child = elemIt.next(); if ( ! matches(getInstances(child), getEnvironments(child), getRegions(child))) { parent.removeChild(child); elemIt.remove(); } } } private boolean matches(Set<InstanceName> elementInstances, Set<Environment> elementEnvironments, Set<RegionName> elementRegions) { if ( ! elementInstances.isEmpty()) { if ( ! elementInstances.contains(instance)) return false; } if ( ! elementEnvironments.isEmpty()) { if ( ! elementEnvironments.contains(environment)) return false; } if ( ! elementRegions.isEmpty()) { if ( environment.isMultiRegion() && ! elementRegions.contains(region)) return false; if ( ! environment.isMultiRegion() && elementEnvironments.isEmpty() ) return false; } return true; } /** * Find the most specific element and remove all others. */ private void retainMostSpecific(Element parent, List<Element> children, Context context) { List<Element> bestMatches = new ArrayList<>(); int bestMatch = 0; for (Element child : children) { bestMatch = updateBestMatches(bestMatches, child, bestMatch, context); } if (bestMatch > 0) { doElementSpecificProcessingOnOverride(bestMatches); for (Element child : children) { if ( ! bestMatches.contains(child)) { parent.removeChild(child); } } } } private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) { int overrideCount = getNumberOfOverrides(child, context); if (overrideCount >= bestMatch) { if (overrideCount > bestMatch) bestMatches.clear(); bestMatches.add(child); return overrideCount; } else { return bestMatch; } } private int getNumberOfOverrides(Element child, Context context) { int currentMatch = 0; Set<InstanceName> elementInstances = hasInstance(child) ? getInstances(child) : context.instances; Set<Environment> elementEnvironments = hasEnvironment(child) ? getEnvironments(child) : context.environments; Set<RegionName> elementRegions = hasRegion(child) ? getRegions(child) : context.regions; if ( ! elementInstances.isEmpty() && elementInstances.contains(instance)) currentMatch++; if ( ! elementEnvironments.isEmpty() && elementEnvironments.contains(environment)) currentMatch++; if ( ! elementRegions.isEmpty() && elementRegions.contains(region)) currentMatch++; return currentMatch; } /** Called on each element which is selected by matching some override condition */ /** * Retains all elements where at least one element is overridden. Removes non-overridden elements from map. */ private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) { Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator(); while (it.hasNext()) { List<Element> elements = it.next().getValue(); boolean hasOverrides = false; for (Element element : elements) { if (hasEnvironment(element) || hasRegion(element)) { hasOverrides = true; } } if (!hasOverrides) { it.remove(); } } } private boolean hasInstance(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); } private boolean hasRegion(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } private boolean hasEnvironment(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); } private Set<InstanceName> getInstances(Element element) { String instance = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); if (instance == null || instance.isEmpty()) return Collections.emptySet(); return Arrays.stream(instance.split(" ")).map(InstanceName::from).collect(Collectors.toSet()); } private Set<Environment> getEnvironments(Element element) { String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); if (env == null || env.isEmpty()) return Collections.emptySet(); return Arrays.stream(env.split(" ")).map(Environment::from).collect(Collectors.toSet()); } private Set<RegionName> getRegions(Element element) { String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); if (reg == null || reg.isEmpty()) return Collections.emptySet(); return Arrays.stream(reg.split(" ")).map(RegionName::from).collect(Collectors.toSet()); } private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) { Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>(); for (Element child : children) { String key = child.getTagName(); if (child.hasAttribute(ID_ATTRIBUTE)) { key += child.getAttribute(ID_ATTRIBUTE); } if ( ! elementsByTagName.containsKey(key)) { elementsByTagName.put(key, new ArrayList<>()); } elementsByTagName.get(key).add(child); } return elementsByTagName; } private static String getPrintableElement(Element element) { StringBuilder sb = new StringBuilder(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ").append(attributes.item(i).getNodeName()); } return sb.toString(); } private static String getPrintableElementRecursive(Element element) { StringBuilder sb = new StringBuilder(); sb.append(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ") .append(attributes.item(i).getNodeName()) .append("=") .append(attributes.item(i).getNodeValue()); } final List<Element> children = XML.getChildren(element); if (children.size() > 0) { sb.append("\n"); for (Element e : children) sb.append("\t").append(getPrintableElementRecursive(e)); } return sb.toString(); } /** * Represents environment and region in a given context. */ private static final class Context { final Set<InstanceName> instances; final Set<Environment> environments; final Set<RegionName> regions; private Context(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { this.instances = Set.copyOf(instances); this.environments = Set.copyOf(environments); this.regions = Set.copyOf(regions); } static Context empty() { return new Context(Set.of(), Set.of(), Set.of()); } public static Context create(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { return new Context(instances, environments, regions); } } }
class OverrideProcessor implements PreProcessor { private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName()); private final InstanceName instance; private final Environment environment; private final RegionName region; private static final String ID_ATTRIBUTE = "id"; private static final String INSTANCE_ATTRIBUTE = "instance"; private static final String ENVIRONMENT_ATTRIBUTE = "environment"; private static final String REGION_ATTRIBUTE = "region"; public OverrideProcessor(InstanceName instance, Environment environment, RegionName region) { this.instance = instance; this.environment = environment; this.region = region; } public Document process(Document input) throws TransformerException { log.log(Level.FINE, "Preprocessing overrides with " + environment + "." + region); Document ret = Xml.copyDocument(input); Element root = ret.getDocumentElement(); applyOverrides(root, Context.empty()); return ret; } private void applyOverrides(Element parent, Context context) { context = getParentContext(parent, context); Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent)); retainOverriddenElements(elementsByTagName); for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) { pruneOverrides(parent, entry.getValue(), context); } for (Element child : XML.getChildren(parent)) { applyOverrides(child, context); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } } private Context getParentContext(Element parent, Context context) { Set<InstanceName> instances = context.instances; Set<Environment> environments = context.environments; Set<RegionName> regions = context.regions; if (instances.isEmpty()) instances = getInstances(parent); if (environments.isEmpty()) environments = getEnvironments(parent); if (regions.isEmpty()) regions = getRegions(parent); return Context.create(instances, environments, regions); } /** * Prune overrides from parent according to deploy override rules. * * @param parent parent {@link Element} above children. * @param children children where one {@link Element} will remain as the overriding element * @param context current context with instance, environment and region. */ private void pruneOverrides(Element parent, List<Element> children, Context context) { checkConsistentInheritance(children, context); pruneNonMatching(parent, children); retainMostSpecific(parent, children, context); } private void checkConsistentInheritance(List<Element> children, Context context) { for (Element child : children) { Set<InstanceName> instances = getInstances(child); if ( ! instances.isEmpty() && ! context.instances.isEmpty() && ! context.instances.containsAll(instances)) { throw new IllegalArgumentException("Instances in child (" + instances + ") are not a subset of those of the parent (" + context.instances + ") at " + child); } Set<Environment> environments = getEnvironments(child); if ( ! environments.isEmpty() && ! context.environments.isEmpty() && ! context.environments.containsAll(environments)) { throw new IllegalArgumentException("Environments in child (" + environments + ") are not a subset of those of the parent (" + context.environments + ") at " + child); } Set<RegionName> regions = getRegions(child); if ( ! regions.isEmpty() && ! context.regions.isEmpty() && ! context.regions.containsAll(regions)) { throw new IllegalArgumentException("Regions in child (" + regions + ") are not a subset of those of the parent (" + context.regions + ") at " + child); } } } /** * Prune elements that are not matching our environment and region */ private void pruneNonMatching(Element parent, List<Element> children) { Iterator<Element> elemIt = children.iterator(); while (elemIt.hasNext()) { Element child = elemIt.next(); if ( ! matches(getInstances(child), getEnvironments(child), getRegions(child))) { parent.removeChild(child); elemIt.remove(); } } } private boolean matches(Set<InstanceName> elementInstances, Set<Environment> elementEnvironments, Set<RegionName> elementRegions) { if ( ! elementInstances.isEmpty()) { if ( ! elementInstances.contains(instance)) return false; } if ( ! elementEnvironments.isEmpty()) { if ( ! elementEnvironments.contains(environment)) return false; } if ( ! elementRegions.isEmpty()) { if ( environment.isMultiRegion() && ! elementRegions.contains(region)) return false; if ( ! environment.isMultiRegion() && elementEnvironments.isEmpty() ) return false; } return true; } /** * Find the most specific element and remove all others. */ private void retainMostSpecific(Element parent, List<Element> children, Context context) { List<Element> bestMatches = new ArrayList<>(); int bestMatch = 0; for (Element child : children) { bestMatch = updateBestMatches(bestMatches, child, bestMatch, context); } if (bestMatch > 0) { doElementSpecificProcessingOnOverride(bestMatches); for (Element child : children) { if ( ! bestMatches.contains(child)) { parent.removeChild(child); } } } } private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) { int overrideCount = getNumberOfOverrides(child, context); if (overrideCount >= bestMatch) { if (overrideCount > bestMatch) bestMatches.clear(); bestMatches.add(child); return overrideCount; } else { return bestMatch; } } private int getNumberOfOverrides(Element child, Context context) { int currentMatch = 0; Set<InstanceName> elementInstances = hasInstance(child) ? getInstances(child) : context.instances; Set<Environment> elementEnvironments = hasEnvironment(child) ? getEnvironments(child) : context.environments; Set<RegionName> elementRegions = hasRegion(child) ? getRegions(child) : context.regions; if ( ! elementInstances.isEmpty() && elementInstances.contains(instance)) currentMatch++; if ( ! elementEnvironments.isEmpty() && elementEnvironments.contains(environment)) currentMatch++; if ( ! elementRegions.isEmpty() && elementRegions.contains(region)) currentMatch++; return currentMatch; } /** Called on each element which is selected by matching some override condition */ private static boolean hasChildWithTagName(Element element, String childName) { for (var child : XML.getChildren(element)) { if (child.getTagName().equals(childName)) return true; } return false; } /** * Retains all elements where at least one element is overridden. Removes non-overridden elements from map. */ private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) { Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator(); while (it.hasNext()) { List<Element> elements = it.next().getValue(); boolean hasOverrides = false; for (Element element : elements) { if (hasEnvironment(element) || hasRegion(element)) { hasOverrides = true; } } if (!hasOverrides) { it.remove(); } } } private boolean hasInstance(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); } private boolean hasRegion(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); } private boolean hasEnvironment(Element element) { return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); } private Set<InstanceName> getInstances(Element element) { String instance = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE); if (instance == null || instance.isEmpty()) return Collections.emptySet(); return Arrays.stream(instance.split(" ")).map(InstanceName::from).collect(Collectors.toSet()); } private Set<Environment> getEnvironments(Element element) { String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE); if (env == null || env.isEmpty()) return Collections.emptySet(); return Arrays.stream(env.split(" ")).map(Environment::from).collect(Collectors.toSet()); } private Set<RegionName> getRegions(Element element) { String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE); if (reg == null || reg.isEmpty()) return Collections.emptySet(); return Arrays.stream(reg.split(" ")).map(RegionName::from).collect(Collectors.toSet()); } private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) { Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>(); for (Element child : children) { String key = child.getTagName(); if (child.hasAttribute(ID_ATTRIBUTE)) { key += child.getAttribute(ID_ATTRIBUTE); } if ( ! elementsByTagName.containsKey(key)) { elementsByTagName.put(key, new ArrayList<>()); } elementsByTagName.get(key).add(child); } return elementsByTagName; } private static String getPrintableElement(Element element) { StringBuilder sb = new StringBuilder(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ").append(attributes.item(i).getNodeName()); } return sb.toString(); } private static String getPrintableElementRecursive(Element element) { StringBuilder sb = new StringBuilder(); sb.append(element.getTagName()); final NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { sb.append(" ") .append(attributes.item(i).getNodeName()) .append("=") .append(attributes.item(i).getNodeValue()); } final List<Element> children = XML.getChildren(element); if (children.size() > 0) { sb.append("\n"); for (Element e : children) sb.append("\t").append(getPrintableElementRecursive(e)); } return sb.toString(); } /** * Represents environment and region in a given context. */ private static final class Context { final Set<InstanceName> instances; final Set<Environment> environments; final Set<RegionName> regions; private Context(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { this.instances = Set.copyOf(instances); this.environments = Set.copyOf(environments); this.regions = Set.copyOf(regions); } static Context empty() { return new Context(Set.of(), Set.of(), Set.of()); } public static Context create(Set<InstanceName> instances, Set<Environment> environments, Set<RegionName> regions) { return new Context(instances, environments, regions); } } }
Consider creating a separate method (e.g. `alignUp2MiB`) and possibly add a comment indicating why this is done
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { final int twoMB = 0x200000; final int fourK = 0x1000; this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK; }
mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK;
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = alignUp2MiB(properties.mergeChunkSize()); }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
Also, do we need the -4KiB adjustment with today's allocation strategies? The config comment mentions this as being done to avoid hitting the next malloc size class, but I'm not sure if this is relevant now.
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { final int twoMB = 0x200000; final int fourK = 0x1000; this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK; }
mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK;
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = alignUp2MiB(properties.mergeChunkSize()); }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
Perhaps add a comment here on why we do the extra calculation.
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { final int twoMB = 0x200000; final int fourK = 0x1000; this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK; }
mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK;
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = alignUp2MiB(properties.mergeChunkSize()); }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
Shouldn't this return `currentAllocation`? In the case this is meant to fix: 1. cfg1 calculates a new target, `t1`, and deploys with it 2. cfg2 decides that the new target, `t2`, which is not sufficiently different from the current, so it returns empty, which means that target `t1` is cleared 3. On the next deploy, on any cfg: There is no target and the nodes provisioned with `t1` are not yet active, so it will revert back to the previous target (per `NodeRepositoryProvsioner::currentResources`)
private Advice autoscale(Cluster cluster, List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes, nodeRepository)) return Advice.none(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(cluster, clusterNodes, metricsDb, nodeRepository); Optional<Double> cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); Optional<Double> memoryLoad = clusterTimeseries.averageLoad(Resource.memory); Optional<Double> diskLoad = clusterTimeseries.averageLoad(Resource.disk); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Advice.none(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale(); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale(); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); }
if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale();
private Advice autoscale(Cluster cluster, List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes, nodeRepository)) return Advice.none(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(cluster, clusterNodes, metricsDb, nodeRepository); Optional<Double> cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); Optional<Double> memoryLoad = clusterTimeseries.averageLoad(Resource.memory); Optional<Double> diskLoad = clusterTimeseries.averageLoad(Resource.disk); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Advice.none(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale(); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale(); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none(); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ static Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(1); } static Duration maxScalingWindow() { return Duration.ofHours(12); } /** Measurements are currently taken once a minute. See also scalingWindow */ static int minimumMeasurementsPerNode(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return 60; return 20; } public static boolean unstable(List<Node> nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return true; if (nodeRepository.getNodes(nodes.get(0).allocation().get().owner(), Node.State.reserved).size() > 0) return true; return false; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private Advice(Optional<ClusterResources> target, boolean present) { this.target = target; this.present = present; } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } private static Advice none() { return new Advice(Optional.empty(), false); } private static Advice dontScale() { return new Advice(Optional.empty(), true); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true); } } }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none(); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ static Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(1); } static Duration maxScalingWindow() { return Duration.ofHours(12); } /** Measurements are currently taken once a minute. See also scalingWindow */ static int minimumMeasurementsPerNode(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return 60; return 20; } public static boolean unstable(List<Node> nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return true; if (nodeRepository.getNodes(nodes.get(0).allocation().get().owner(), Node.State.reserved).size() > 0) return true; return false; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private Advice(Optional<ClusterResources> target, boolean present) { this.target = target; this.present = present; } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } private static Advice none() { return new Advice(Optional.empty(), false); } private static Advice dontScale() { return new Advice(Optional.empty(), true); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true); } } }
Separated out the 2MB alignment and removed the 4k reservation. Also updated the description.
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { final int twoMB = 0x200000; final int fourK = 0x1000; this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK; }
mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK;
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = alignUp2MiB(properties.mergeChunkSize()); }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
Fixed
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { final int twoMB = 0x200000; final int fourK = 0x1000; this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK; }
mergeChunkSize = ((properties.mergeChunkSize() + (twoMB-1))/twoMB)*twoMB - fourK;
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) { this.numThreads = numThreads; this.cluster = parent; this.reponseNumThreads = properties.defaultNumResponseThreads(); this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType()); useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule(); mergeChunkSize = alignUp2MiB(properties.mergeChunkSize()); }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
class Builder { protected FileStorProducer build(ModelContext.Properties properties, ContentCluster parent, ModelElement clusterElem) { return new FileStorProducer(properties, parent, getThreads(clusterElem)); } private Integer getThreads(ModelElement clusterElem) { ModelElement tuning = clusterElem.child("tuning"); if (tuning == null) { return null; } ModelElement threads = tuning.child("persistence-threads"); if (threads == null) { return null; } Integer count = threads.integerAttribute("count"); if (count != null) { return count; } int numThreads = 0; for (ModelElement thread : threads.subElements("thread")) { count = thread.integerAttribute("count"); numThreads += (count == null) ? 1 : count; } return numThreads; } }
I'd love custom table formatting to make the durations easily comparable, but I'm probably weird o_O
public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.hostRepairMaintainer = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); }
this.containerImageExpirer = duration(2, HOURS);
public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(5, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(1, MINUTES); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(10, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.hostRepairMaintainer = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); }
class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration hostRepairMaintainer; private final Duration containerImageExpirer; private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } }
class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration hostRepairMaintainer; private final Duration containerImageExpirer; private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } }
Strange variabble type for a store-only set ;)
public List<NewDocumentType> getDocumentTypesWithStoreOnly() { List<NewDocumentType> indexedDocTypes = new ArrayList<>(); for (NewDocumentType type : documentDefinitions.values()) { if (findStreamingCluster(type.getFullName().getName()).isEmpty() && (hasIndexedCluster() && !getIndexed().hasDocumentDB(type.getFullName().getName()) || !hasIndexedCluster())) { indexedDocTypes.add(type); } } return indexedDocTypes; }
List<NewDocumentType> indexedDocTypes = new ArrayList<>();
public List<NewDocumentType> getDocumentTypesWithStoreOnly() { return documentTypes(this::hasIndexingModeStoreOnly); }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; private final boolean combined; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments, boolean combined) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; this.combined = combined; } @Override protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterId(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, deployState.getProperties(), documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, deployState), combined); ModelElement tuning = clusterElem.childByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml())); } ModelElement protonElem = clusterElem.childByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search); buildIndexedSearchCluster(deployState, clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return ! stateIsHosted(deployState); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.stringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSchemas(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { search.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSchemas(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.child("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.stringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
Current isn't the current target but the current allocation (active nodes).
private Advice autoscale(Cluster cluster, List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes, nodeRepository)) return Advice.none(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(cluster, clusterNodes, metricsDb, nodeRepository); Optional<Double> cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); Optional<Double> memoryLoad = clusterTimeseries.averageLoad(Resource.memory); Optional<Double> diskLoad = clusterTimeseries.averageLoad(Resource.disk); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Advice.none(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale(); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale(); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); }
if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale();
private Advice autoscale(Cluster cluster, List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes, nodeRepository)) return Advice.none(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); ClusterTimeseries clusterTimeseries = new ClusterTimeseries(cluster, clusterNodes, metricsDb, nodeRepository); Optional<Double> cpuLoad = clusterTimeseries.averageLoad(Resource.cpu); Optional<Double> memoryLoad = clusterTimeseries.averageLoad(Resource.memory); Optional<Double> diskLoad = clusterTimeseries.averageLoad(Resource.disk); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Advice.none(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Advice.dontScale(); if (similar(bestAllocation.get(), currentAllocation)) return Advice.dontScale(); return Advice.scaleTo(bestAllocation.get().toAdvertisedClusterResources()); }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none(); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ static Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(1); } static Duration maxScalingWindow() { return Duration.ofHours(12); } /** Measurements are currently taken once a minute. See also scalingWindow */ static int minimumMeasurementsPerNode(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return 60; return 20; } public static boolean unstable(List<Node> nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return true; if (nodeRepository.getNodes(nodes.get(0).allocation().get().owner(), Node.State.reserved).size() > 0) return true; return false; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private Advice(Optional<ClusterResources> target, boolean present) { this.target = target; this.present = present; } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } private static Advice none() { return new Advice(Optional.empty(), false); } private static Advice dontScale() { return new Advice(Optional.empty(), true); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true); } } }
class Autoscaler { /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final MetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(MetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(cluster, clusterNodes, Limits.empty(), cluster.exclusive()); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return scaling advice for this cluster */ public Advice autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Advice.none(); return autoscale(cluster, clusterNodes, Limits.of(cluster), cluster.exclusive()); } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / (( r1 + r2) / 2) < threshold; } /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */ static Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(1); } static Duration maxScalingWindow() { return Duration.ofHours(12); } /** Measurements are currently taken once a minute. See also scalingWindow */ static int minimumMeasurementsPerNode(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return 60; return 20; } public static boolean unstable(List<Node> nodes, NodeRepository nodeRepository) { if (nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable())) return true; if (nodeRepository.getNodes(nodes.get(0).allocation().get().owner(), Node.State.reserved).size() > 0) return true; return false; } public static class Advice { private final boolean present; private final Optional<ClusterResources> target; private Advice(Optional<ClusterResources> target, boolean present) { this.target = target; this.present = present; } /** * Returns the autoscaling target that should be set by this advice. * This is empty if the advice is to keep the current allocation. */ public Optional<ClusterResources> target() { return target; } /** True if this does not provide any advice */ public boolean isEmpty() { return ! present; } /** True if this provides advice (which may be to keep the current allocation) */ public boolean isPresent() { return present; } private static Advice none() { return new Advice(Optional.empty(), false); } private static Advice dontScale() { return new Advice(Optional.empty(), true); } private static Advice scaleTo(ClusterResources target) { return new Advice(Optional.of(target), true); } } }
This will intentionally happen for _all the things_ the first time this all rolls out to existing deployments, so no reprocessing will start immediately.
public void reindex() throws ReindexingLockException { try (Lock lock = database.lockReindexing()) { Reindexing reindexing = database.readReindexing(); for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) { log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); } else { Status status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); reindexing = reindexing.with(type, progress(type, status)); } if (Thread.interrupted()) break; } database.writeReindexing(reindexing); } }
public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private Status progress(DocumentType type, Status status) { if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return status; case RUNNING: log.log(WARNING, "Unepxected state 'RUNNING' of reindexing of " + type); case READY: } status = status.running(); VisitorControlHandler control = new VisitorControlHandler(); visit(type, status.progress().orElse(null), control); if (control.getProgress() != null) status = status.progressed(control.getProgress()); CompletionCode code = control.getResult() != null ? control.getResult().getCode() : ABORTED; switch (code) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); return status.failed(clock.instant(), control.getResult().getMessage()); case ABORTED: log.log(FINE, () -> "Aborting reindexing of " + type + " due to shutdown — will continue later"); return status.halted(); case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); return status.successful(clock.instant()); } } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } try { control.waitUntilDone(); } catch (InterruptedException e) { control.abort(); Thread.currentThread().interrupt(); } session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
This should not be logged as warning
public void reindex() throws ReindexingLockException { try (Lock lock = database.lockReindexing()) { Reindexing reindexing = database.readReindexing(); for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) { log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); } else { Status status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); reindexing = reindexing.with(type, progress(type, status)); } if (Thread.interrupted()) break; } database.writeReindexing(reindexing); } }
log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " +
public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private Status progress(DocumentType type, Status status) { if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return status; case RUNNING: log.log(WARNING, "Unepxected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type + ", which started at " + status.startedAt()); } status = status.running(); VisitorControlHandler control = new VisitorControlHandler(); visit(type, status.progress().orElse(null), control); if (control.getProgress() != null) status = status.progressed(control.getProgress()); CompletionCode code = control.getResult() != null ? control.getResult().getCode() : ABORTED; switch (code) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); return status.failed(clock.instant(), control.getResult().getMessage()); case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); return status.halted(); case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); return status.successful(clock.instant()); } } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } try { control.waitUntilDone(); } catch (InterruptedException e) { control.abort(); Thread.currentThread().interrupt(); } session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
I don't know ... ti shouldn't happen. We only send out config for "now", and those "now"s should be a little in the past.
public void reindex() throws ReindexingLockException { try (Lock lock = database.lockReindexing()) { Reindexing reindexing = database.readReindexing(); for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) { log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); } else { Status status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); reindexing = reindexing.with(type, progress(type, status)); } if (Thread.interrupted()) break; } database.writeReindexing(reindexing); } }
log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " +
public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private Status progress(DocumentType type, Status status) { if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return status; case RUNNING: log.log(WARNING, "Unepxected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type + ", which started at " + status.startedAt()); } status = status.running(); VisitorControlHandler control = new VisitorControlHandler(); visit(type, status.progress().orElse(null), control); if (control.getProgress() != null) status = status.progressed(control.getProgress()); CompletionCode code = control.getResult() != null ? control.getResult().getCode() : ABORTED; switch (code) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); return status.failed(clock.instant(), control.getResult().getMessage()); case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); return status.halted(); case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); return status.successful(clock.instant()); } } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } try { control.waitUntilDone(); } catch (InterruptedException e) { control.abort(); Thread.currentThread().interrupt(); } session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
Can go with INFO.
public void reindex() throws ReindexingLockException { try (Lock lock = database.lockReindexing()) { Reindexing reindexing = database.readReindexing(); for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) { log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); } else { Status status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); reindexing = reindexing.with(type, progress(type, status)); } if (Thread.interrupted()) break; } database.writeReindexing(reindexing); } }
log.log(WARNING, "Received config for reindexing which is ready in the future — will process later " +
public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private Status progress(DocumentType type, Status status) { if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return status; case RUNNING: log.log(WARNING, "Unepxected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type + ", which started at " + status.startedAt()); } status = status.running(); VisitorControlHandler control = new VisitorControlHandler(); visit(type, status.progress().orElse(null), control); if (control.getProgress() != null) status = status.progressed(control.getProgress()); CompletionCode code = control.getResult() != null ? control.getResult().getCode() : ABORTED; switch (code) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); return status.failed(clock.instant(), control.getResult().getMessage()); case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); return status.halted(); case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); return status.successful(clock.instant()); } } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } try { control.waitUntilDone(); } catch (InterruptedException e) { control.abort(); Thread.currentThread().interrupt(); } session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
I presume this is a synchronous write to ZooKeeper? _If_ it's observed to be a performance issue, it could be useful to consider making progress updates async since this is called in the context of a visitor client worker thread callback.
private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); }
database.writeReindexing(reindexing = reindexing.with(type, status));
private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Tells this to stop reindexing at its leisure. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
For those of us not too familiar with these newfangled and fancy `Phaser` doohickies, a comment on how this ends up transitively aborting the active visitor session would be nice 🦖
public void shutdown() { phaser.forceTermination(); }
phaser.forceTermination();
public void shutdown() { phaser.forceTermination(); }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Tells this to stop reindexing at its leisure. */ /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
Noted.
private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); }
database.writeReindexing(reindexing = reindexing.with(type, status));
private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Tells this to stop reindexing at its leisure. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ public void shutdown() { phaser.forceTermination(); } /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
Of course!
public void shutdown() { phaser.forceTermination(); }
phaser.forceTermination();
public void shutdown() { phaser.forceTermination(); }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Tells this to stop reindexing at its leisure. */ /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
class Reindexer { private static final Logger log = Logger.getLogger(Reindexer.class.getName()); private final Cluster cluster; private final Map<DocumentType, Instant> ready; private final ReindexingCurator database; private final DocumentAccess access; private final Clock clock; private final Phaser phaser = new Phaser(2); private Reindexing reindexing; private Status status; public Reindexer(Cluster cluster, Map<DocumentType, Instant> ready, ReindexingCurator database, DocumentAccess access, Clock clock) { for (DocumentType type : ready.keySet()) cluster.bucketSpaceOf(type); this.cluster = cluster; this.ready = new TreeMap<>(ready); this.database = database; this.access = access; this.clock = clock; } /** Lets the reindexere abort any ongoing visit session, wait for it to complete normally, then exit. */ /** Starts and tracks reprocessing of ready document types until done, or interrupted. */ public void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); try (Lock lock = database.lockReindexing()) { for (DocumentType type : ready.keySet()) { if (ready.get(type).isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + ready.get(type) + " is after " + clock.instant() + ")"); else progress(type); if (phaser.isTerminated()) break; } } } @SuppressWarnings("fallthrough") private void progress(DocumentType type) { reindexing = database.readReindexing(); status = reindexing.status().getOrDefault(type, Status.ready(clock.instant()) .running() .successful(clock.instant())); if (ready.get(type).isAfter(status.startedAt())) status = Status.ready(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); switch (status.state()) { default: log.log(WARNING, "Unknown reindexing state '" + status.state() + "'"); case FAILED: log.log(FINE, () -> "Not continuing reindexing of " + type + " due to previous failure"); case SUCCESSFUL: return; case RUNNING: log.log(WARNING, "Unexpected state 'RUNNING' of reindexing of " + type); case READY: log.log(FINE, () -> "Running reindexing of " + type); } status = status.running(); AtomicReference<Instant> progressLastStored = new AtomicReference<>(clock.instant()); VisitorControlHandler control = new VisitorControlHandler() { @Override public void onProgress(ProgressToken token) { super.onProgress(token); status = status.progressed(token); if (progressLastStored.get().isBefore(clock.instant().minusSeconds(10))) { progressLastStored.set(clock.instant()); database.writeReindexing(reindexing = reindexing.with(type, status)); } } @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); phaser.arriveAndAwaitAdvance(); } }; visit(type, status.progress().orElse(null), control); switch (control.getResult().getCode()) { default: log.log(WARNING, "Unexpected visitor result '" + control.getResult().getCode() + "'"); case FAILURE: log.log(WARNING, "Visiting failed: " + control.getResult().getMessage()); status = status.failed(clock.instant(), control.getResult().getMessage()); break; case ABORTED: log.log(FINE, () -> "Halting reindexing of " + type + " due to shutdown — will continue later"); status = status.halted(); break; case SUCCESS: log.log(INFO, "Completed reindexing of " + type + " after " + Duration.between(status.startedAt(), clock.instant())); status = status.successful(clock.instant()); } database.writeReindexing(reindexing.with(type, status)); } private void visit(DocumentType type, ProgressToken progress, VisitorControlHandler control) { VisitorParameters parameters = createParameters(type, progress); parameters.setControlHandler(control); VisitorSession session; try { session = access.createVisitorSession(parameters); } catch (ParseException e) { throw new IllegalStateException(e); } phaser.arriveAndAwaitAdvance(); session.destroy(); } VisitorParameters createParameters(DocumentType type, ProgressToken progress) { VisitorParameters parameters = new VisitorParameters(type.getName()); parameters.setRemoteDataHandler(cluster.name()); parameters.setResumeToken(progress); parameters.setFieldSet(type.getName() + ":[document]"); parameters.setPriority(DocumentProtocol.Priority.LOW_1); parameters.setRoute(cluster.route()); parameters.setBucketSpace(cluster.bucketSpaceOf(type)); return parameters; } static class Cluster { private final String name; private final String configId; private final Map<DocumentType, String> documentBuckets; Cluster(String name, String configId, Map<DocumentType, String> documentBuckets) { this.name = requireNonNull(name); this.configId = requireNonNull(configId); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Storage:cluster=" + name + ";clusterconfigid=" + configId + "]"; } String bucketSpaceOf(DocumentType documentType) { return requireNonNull(documentBuckets.get(documentType), "Unknown bucket space for " + documentType); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Cluster cluster = (Cluster) o; return name.equals(cluster.name) && configId.equals(cluster.configId) && documentBuckets.equals(cluster.documentBuckets); } @Override public int hashCode() { return Objects.hash(name, configId, documentBuckets); } @Override public String toString() { return "Cluster{" + "name='" + name + '\'' + ", configId='" + configId + '\'' + ", documentBuckets=" + documentBuckets + '}'; } } }
Maybe this is too low compared to the request timeouts?
private static CloseableHttpClient createHttpClient() { return VespaHttpClientBuilder .create() .setUserAgent("config-convergence-checker") .setConnectionTimeToLive(20, TimeUnit.SECONDS) .setMaxConnPerRoute(4) .setMaxConnTotal(100) .setDefaultRequestConfig(createRequestConfig(Duration.ofSeconds(10))) .build(); }
.setConnectionTimeToLive(20, TimeUnit.SECONDS)
private static CloseableHttpClient createHttpClient() { return VespaHttpClientBuilder .create() .setUserAgent("config-convergence-checker") .setConnectionTimeToLive(20, TimeUnit.SECONDS) .setMaxConnPerRoute(4) .setMaxConnTotal(100) .setDefaultRequestConfig(createRequestConfig(Duration.ofSeconds(10))) .build(); }
class ConfigConvergenceChecker extends AbstractComponent { private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName()); private final static Set<String> serviceTypesToCheck = Set.of( CONTAINER.serviceName, QRSERVER.serviceName, LOGSERVER_CONTAINER.serviceName, CLUSTERCONTROLLER_CONTAINER.serviceName, "searchnode", "storagenode", "distributor" ); private final CloseableHttpClient httpClient; private final ObjectMapper jsonMapper = new ObjectMapper(); @Inject public ConfigConvergenceChecker() { this.httpClient = createHttpClient(); } /** Fetches the active config generation for all services in the given application. */ public Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService) { List<ServiceInfo> servicesToCheck = new ArrayList<>(); application.getModel().getHosts() .forEach(host -> host.getServices().stream() .filter(service -> serviceTypesToCheck.contains(service.getServiceType())) .forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service)))); return getServiceGenerations(servicesToCheck, timeoutPerService); } /** Check all services in given application. Returns the minimum current generation of all services */ public JSONResponse getServiceConfigGenerationsResponse(Application application, URI requestUrl, Duration timeoutPerService) { Map<ServiceInfo, Long> currentGenerations = getServiceConfigGenerations(application, timeoutPerService); long currentGeneration = currentGenerations.values().stream().mapToLong(Long::longValue).min().orElse(-1); return new ServiceListResponse(200, currentGenerations, requestUrl, application.getApplicationGeneration(), currentGeneration); } /** Check service identified by host and port in given application */ public JSONResponse getServiceConfigGenerationResponse(Application application, String hostAndPortToCheck, URI requestUrl, Duration timeout) { Long wantedGeneration = application.getApplicationGeneration(); try { if ( ! hostInApplication(application, hostAndPortToCheck)) return ServiceResponse.createHostNotFoundInAppResponse(requestUrl, hostAndPortToCheck, wantedGeneration); long currentGeneration = getServiceGeneration(URI.create("http: boolean converged = currentGeneration >= wantedGeneration; return ServiceResponse.createOkResponse(requestUrl, hostAndPortToCheck, wantedGeneration, currentGeneration, converged); } catch (NonSuccessStatusCodeException | IOException e) { return ServiceResponse.createNotFoundResponse(requestUrl, hostAndPortToCheck, wantedGeneration, e.getMessage()); } catch (Exception e) { return ServiceResponse.createErrorResponse(requestUrl, hostAndPortToCheck, wantedGeneration, e.getMessage()); } } @Override public void deconstruct() { try { httpClient.close(); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Gets service generation for a list of services (in parallel). */ private Map<ServiceInfo, Long> getServiceGenerations(List<ServiceInfo> services, Duration timeout) { return services.parallelStream() .collect(Collectors.toMap( service -> service, service -> { try { return getServiceGeneration(URI.create("http: + ":" + getStatePort(service).get()), timeout); } catch (IOException | NonSuccessStatusCodeException e) { return -1L; } }, (v1, v2) -> { throw new IllegalStateException("Duplicate keys for values '" + v1 + "' and '" + v2 + "'."); }, LinkedHashMap::new )); } /** Get service generation of service at given URL */ private long getServiceGeneration(URI serviceUrl, Duration timeout) throws IOException, NonSuccessStatusCodeException { HttpGet request = new HttpGet(createApiUri(serviceUrl)); request.setConfig(createRequestConfig(timeout)); try (CloseableHttpResponse response = httpClient.execute(request)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) throw new NonSuccessStatusCodeException(statusCode); if (response.getEntity() == null) throw new IOException("Response has no content"); JsonNode jsonContent = jsonMapper.readTree(response.getEntity().getContent()); return generationFromContainerState(jsonContent); } catch (Exception e) { log.log( LogLevel.DEBUG, e, () -> String.format("Failed to retrieve service config generation for '%s': %s", serviceUrl, e.getMessage())); throw e; } } private boolean hostInApplication(Application application, String hostPort) { for (HostInfo host : application.getModel().getHosts()) { if (hostPort.startsWith(host.getHostname())) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (hostPort.equals(host.getHostname() + ":" + port.getPort())) { return true; } } } } } return false; } private static Optional<Integer> getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findFirst(); } private static long generationFromContainerState(JsonNode state) { return state.get("config").get("generation").asLong(-1); } private static URI createApiUri(URI serviceUrl) { try { return new URIBuilder(serviceUrl) .setPath("/state/v1/config") .build(); } catch (URISyntaxException e) { throw new RuntimeException(e); } } private static RequestConfig createRequestConfig(Duration timeout) { int timeoutMillis = (int)timeout.toMillis(); return RequestConfig.custom() .setConnectionRequestTimeout(timeoutMillis) .setConnectTimeout(timeoutMillis) .setSocketTimeout(timeoutMillis) .build(); } private static class NonSuccessStatusCodeException extends Exception { final int statusCode; NonSuccessStatusCodeException(int statusCode) { super("Expected status code 200, got " + statusCode); this.statusCode = statusCode; } } private static class ServiceListResponse extends JSONResponse { private ServiceListResponse(int status, Map<ServiceInfo, Long> servicesToCheck, URI uri, long wantedGeneration, long currentGeneration) { super(status); Cursor serviceArray = object.setArray("services"); servicesToCheck.forEach((service, generation) -> { Cursor serviceObject = serviceArray.addObject(); String hostName = service.getHostName(); int statePort = getStatePort(service).get(); serviceObject.setString("host", hostName); serviceObject.setLong("port", statePort); serviceObject.setString("type", service.getServiceType()); serviceObject.setString("url", uri.toString() + "/" + hostName + ":" + statePort); serviceObject.setLong("currentGeneration", generation); }); object.setString("url", uri.toString()); object.setLong("currentGeneration", currentGeneration); object.setLong("wantedGeneration", wantedGeneration); object.setBool("converged", currentGeneration >= wantedGeneration); } } private static class ServiceResponse extends JSONResponse { private ServiceResponse(int status, URI uri, String hostname, Long wantedGeneration) { super(status); object.setString("url", uri.toString()); object.setString("host", hostname); object.setLong("wantedGeneration", wantedGeneration); } static ServiceResponse createOkResponse(URI uri, String hostname, Long wantedGeneration, Long currentGeneration, boolean converged) { ServiceResponse serviceResponse = new ServiceResponse(200, uri, hostname, wantedGeneration); serviceResponse.object.setBool("converged", converged); serviceResponse.object.setLong("currentGeneration", currentGeneration); return serviceResponse; } static ServiceResponse createHostNotFoundInAppResponse(URI uri, String hostname, Long wantedGeneration) { ServiceResponse serviceResponse = new ServiceResponse(410, uri, hostname, wantedGeneration); serviceResponse.object.setString("problem", "Host:port (service) no longer part of application, refetch list of services."); return serviceResponse; } static ServiceResponse createErrorResponse(URI uri, String hostname, Long wantedGeneration, String error) { ServiceResponse serviceResponse = new ServiceResponse(500, uri, hostname, wantedGeneration); serviceResponse.object.setString("error", error); return serviceResponse; } static ServiceResponse createNotFoundResponse(URI uri, String hostname, Long wantedGeneration, String error) { ServiceResponse serviceResponse = new ServiceResponse(404, uri, hostname, wantedGeneration); serviceResponse.object.setString("error", error); return serviceResponse; } } }
class ConfigConvergenceChecker extends AbstractComponent { private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName()); private final static Set<String> serviceTypesToCheck = Set.of( CONTAINER.serviceName, QRSERVER.serviceName, LOGSERVER_CONTAINER.serviceName, CLUSTERCONTROLLER_CONTAINER.serviceName, "searchnode", "storagenode", "distributor" ); private final CloseableHttpClient httpClient; private final ObjectMapper jsonMapper = new ObjectMapper(); @Inject public ConfigConvergenceChecker() { this.httpClient = createHttpClient(); } /** Fetches the active config generation for all services in the given application. */ public Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService) { List<ServiceInfo> servicesToCheck = new ArrayList<>(); application.getModel().getHosts() .forEach(host -> host.getServices().stream() .filter(service -> serviceTypesToCheck.contains(service.getServiceType())) .forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service)))); return getServiceGenerations(servicesToCheck, timeoutPerService); } /** Check all services in given application. Returns the minimum current generation of all services */ public JSONResponse getServiceConfigGenerationsResponse(Application application, URI requestUrl, Duration timeoutPerService) { Map<ServiceInfo, Long> currentGenerations = getServiceConfigGenerations(application, timeoutPerService); long currentGeneration = currentGenerations.values().stream().mapToLong(Long::longValue).min().orElse(-1); return new ServiceListResponse(200, currentGenerations, requestUrl, application.getApplicationGeneration(), currentGeneration); } /** Check service identified by host and port in given application */ public JSONResponse getServiceConfigGenerationResponse(Application application, String hostAndPortToCheck, URI requestUrl, Duration timeout) { Long wantedGeneration = application.getApplicationGeneration(); try { if ( ! hostInApplication(application, hostAndPortToCheck)) return ServiceResponse.createHostNotFoundInAppResponse(requestUrl, hostAndPortToCheck, wantedGeneration); long currentGeneration = getServiceGeneration(URI.create("http: boolean converged = currentGeneration >= wantedGeneration; return ServiceResponse.createOkResponse(requestUrl, hostAndPortToCheck, wantedGeneration, currentGeneration, converged); } catch (NonSuccessStatusCodeException | IOException e) { return ServiceResponse.createNotFoundResponse(requestUrl, hostAndPortToCheck, wantedGeneration, e.getMessage()); } catch (Exception e) { return ServiceResponse.createErrorResponse(requestUrl, hostAndPortToCheck, wantedGeneration, e.getMessage()); } } @Override public void deconstruct() { try { httpClient.close(); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Gets service generation for a list of services (in parallel). */ private Map<ServiceInfo, Long> getServiceGenerations(List<ServiceInfo> services, Duration timeout) { return services.parallelStream() .collect(Collectors.toMap( service -> service, service -> { try { return getServiceGeneration(URI.create("http: + ":" + getStatePort(service).get()), timeout); } catch (IOException | NonSuccessStatusCodeException e) { return -1L; } }, (v1, v2) -> { throw new IllegalStateException("Duplicate keys for values '" + v1 + "' and '" + v2 + "'."); }, LinkedHashMap::new )); } /** Get service generation of service at given URL */ private long getServiceGeneration(URI serviceUrl, Duration timeout) throws IOException, NonSuccessStatusCodeException { HttpGet request = new HttpGet(createApiUri(serviceUrl)); request.setConfig(createRequestConfig(timeout)); try (CloseableHttpResponse response = httpClient.execute(request)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode != HttpStatus.SC_OK) throw new NonSuccessStatusCodeException(statusCode); if (response.getEntity() == null) throw new IOException("Response has no content"); JsonNode jsonContent = jsonMapper.readTree(response.getEntity().getContent()); return generationFromContainerState(jsonContent); } catch (Exception e) { log.log( LogLevel.DEBUG, e, () -> String.format("Failed to retrieve service config generation for '%s': %s", serviceUrl, e.getMessage())); throw e; } } private boolean hostInApplication(Application application, String hostPort) { for (HostInfo host : application.getModel().getHosts()) { if (hostPort.startsWith(host.getHostname())) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (hostPort.equals(host.getHostname() + ":" + port.getPort())) { return true; } } } } } return false; } private static Optional<Integer> getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findFirst(); } private static long generationFromContainerState(JsonNode state) { return state.get("config").get("generation").asLong(-1); } private static URI createApiUri(URI serviceUrl) { try { return new URIBuilder(serviceUrl) .setPath("/state/v1/config") .build(); } catch (URISyntaxException e) { throw new RuntimeException(e); } } private static RequestConfig createRequestConfig(Duration timeout) { int timeoutMillis = (int)timeout.toMillis(); return RequestConfig.custom() .setConnectionRequestTimeout(timeoutMillis) .setConnectTimeout(timeoutMillis) .setSocketTimeout(timeoutMillis) .build(); } private static class NonSuccessStatusCodeException extends Exception { final int statusCode; NonSuccessStatusCodeException(int statusCode) { super("Expected status code 200, got " + statusCode); this.statusCode = statusCode; } } private static class ServiceListResponse extends JSONResponse { private ServiceListResponse(int status, Map<ServiceInfo, Long> servicesToCheck, URI uri, long wantedGeneration, long currentGeneration) { super(status); Cursor serviceArray = object.setArray("services"); servicesToCheck.forEach((service, generation) -> { Cursor serviceObject = serviceArray.addObject(); String hostName = service.getHostName(); int statePort = getStatePort(service).get(); serviceObject.setString("host", hostName); serviceObject.setLong("port", statePort); serviceObject.setString("type", service.getServiceType()); serviceObject.setString("url", uri.toString() + "/" + hostName + ":" + statePort); serviceObject.setLong("currentGeneration", generation); }); object.setString("url", uri.toString()); object.setLong("currentGeneration", currentGeneration); object.setLong("wantedGeneration", wantedGeneration); object.setBool("converged", currentGeneration >= wantedGeneration); } } private static class ServiceResponse extends JSONResponse { private ServiceResponse(int status, URI uri, String hostname, Long wantedGeneration) { super(status); object.setString("url", uri.toString()); object.setString("host", hostname); object.setLong("wantedGeneration", wantedGeneration); } static ServiceResponse createOkResponse(URI uri, String hostname, Long wantedGeneration, Long currentGeneration, boolean converged) { ServiceResponse serviceResponse = new ServiceResponse(200, uri, hostname, wantedGeneration); serviceResponse.object.setBool("converged", converged); serviceResponse.object.setLong("currentGeneration", currentGeneration); return serviceResponse; } static ServiceResponse createHostNotFoundInAppResponse(URI uri, String hostname, Long wantedGeneration) { ServiceResponse serviceResponse = new ServiceResponse(410, uri, hostname, wantedGeneration); serviceResponse.object.setString("problem", "Host:port (service) no longer part of application, refetch list of services."); return serviceResponse; } static ServiceResponse createErrorResponse(URI uri, String hostname, Long wantedGeneration, String error) { ServiceResponse serviceResponse = new ServiceResponse(500, uri, hostname, wantedGeneration); serviceResponse.object.setString("error", error); return serviceResponse; } static ServiceResponse createNotFoundResponse(URI uri, String hostname, Long wantedGeneration, String error) { ServiceResponse serviceResponse = new ServiceResponse(404, uri, hostname, wantedGeneration); serviceResponse.object.setString("error", error); return serviceResponse; } } }
That eliminates the need for nullity checks.
public void getConfig(ReindexingConfig.Builder builder) { builder.clusterName(contentClusterName); if (reindexing == null || !reindexing.enabled()) { builder.enabled(false); return; } builder.enabled(true); for (NewDocumentType type : documentTypes) { String typeName = type.getFullName().getName(); reindexing.status(contentClusterName, typeName).ifPresent(status -> builder.status( typeName, new ReindexingConfig.Status.Builder() .readyAtMillis(status.ready().toEpochMilli()))); } }
if (reindexing == null || !reindexing.enabled()) {
public void getConfig(ReindexingConfig.Builder builder) { builder.clusterName(contentClusterName); builder.enabled(reindexing.enabled()); for (NewDocumentType type : documentTypes) { String typeName = type.getFullName().getName(); reindexing.status(contentClusterName, typeName).ifPresent(status -> builder.status( typeName, new ReindexingConfig.Status.Builder() .readyAtMillis(status.ready().toEpochMilli()))); } }
class ReindexingController extends SimpleComponent implements ReindexingConfig.Producer { static final String REINDEXING_CONTROLLER_BUNDLE = "clustercontroller-reindexer"; private final Reindexing reindexing; private final String contentClusterName; private final Collection<NewDocumentType> documentTypes; ReindexingController(ReindexingContext context) { super(new ComponentModel( BundleInstantiationSpecification.getFromStrings( "reindexing-maintainer", "ai.vespa.reindexing.ReindexingMaintainer", REINDEXING_CONTROLLER_BUNDLE))); this.reindexing = context.reindexing().orElse(null); this.contentClusterName = context.contentClusterName(); this.documentTypes = context.documentTypes(); } @Override }
class ReindexingController extends SimpleComponent implements ReindexingConfig.Producer { static final String REINDEXING_CONTROLLER_BUNDLE = "clustercontroller-reindexer"; private final Reindexing reindexing; private final String contentClusterName; private final Collection<NewDocumentType> documentTypes; ReindexingController(ReindexingContext context) { super(new ComponentModel( BundleInstantiationSpecification.getFromStrings( "reindexing-maintainer", "ai.vespa.reindexing.ReindexingMaintainer", REINDEXING_CONTROLLER_BUNDLE))); this.reindexing = context.reindexing(); this.contentClusterName = context.contentClusterName(); this.documentTypes = context.documentTypes(); } @Override }
Oh yeah.
private void reindexing(ApplicationId application, Method method, String expectedBody) throws IOException { String reindexingUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindexing"; HttpResponse response = createApplicationHandler().handle(createTestRequest(reindexingUrl, method)); assertEquals(200, response.getStatus()); if (expectedBody != null) { ByteArrayOutputStream out = new ByteArrayOutputStream(); response.render(out); System.err.println(out); assertJsonEquals(out.toString(), expectedBody); } }
System.err.println(out);
private void reindexing(ApplicationId application, Method method, String expectedBody) throws IOException { String reindexingUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindexing"; HttpResponse response = createApplicationHandler().handle(createTestRequest(reindexingUrl, method)); assertEquals(200, response.getStatus()); if (expectedBody != null) { ByteArrayOutputStream out = new ByteArrayOutputStream(); response.render(out); assertJsonEquals(out.toString(), expectedBody); } }
class ApplicationHandlerTest { private static final File testApp = new File("src/test/apps/app"); private final static TenantName mytenantName = TenantName.from("mytenant"); private final static ApplicationId myTenantApplicationId = ApplicationId.from(mytenantName, ApplicationName.defaultName(), InstanceName.defaultName()); private final static ApplicationId applicationId = ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.defaultName()); private final static MockTesterClient testerClient = new MockTesterClient(); private static final MockLogRetriever logRetriever = new MockLogRetriever(); private static final Version vespaVersion = Version.fromString("7.8.9"); private TenantRepository tenantRepository; private ApplicationRepository applicationRepository; private MockProvisioner provisioner; private OrchestratorMock orchestrator; private ManualClock clock; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() throws IOException { clock = new ManualClock(); List<ModelFactory> modelFactories = List.of(DeployTester.createModelFactory(vespaVersion)); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath()) .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath()) .build(); TestComponentRegistry componentRegistry = new TestComponentRegistry.Builder() .provisioner(provisioner) .modelFactoryRegistry(new ModelFactoryRegistry(modelFactories)) .configServerConfig(configserverConfig) .clock(clock) .build(); tenantRepository = new TenantRepository(componentRegistry); tenantRepository.addTenant(mytenantName); provisioner = new MockProvisioner(); orchestrator = new OrchestratorMock(); applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withProvisioner(provisioner) .withOrchestrator(orchestrator) .withClock(componentRegistry.getClock()) .withTesterClient(testerClient) .withLogRetriever(logRetriever) .withConfigserverConfig(configserverConfig) .build(); } @After public void shutdown() { tenantRepository.close(); } @Test public void testDelete() throws Exception { TenantName foobar = TenantName.from("foobar"); tenantRepository.addTenant(foobar); { applicationRepository.deploy(testApp, prepareParams(applicationId)); Tenant mytenant = applicationRepository.getTenant(applicationId); deleteAndAssertOKResponse(mytenant, applicationId); } { applicationRepository.deploy(testApp, prepareParams(applicationId)); deleteAndAssertOKResponseMocked(applicationId, true); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationId fooId = new ApplicationId.Builder() .tenant(foobar) .applicationName("foo") .instanceName("quux") .build(); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); applicationRepository.deploy(testApp, prepareParams2); assertApplicationExists(fooId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(fooId, true); assertApplicationExists(applicationId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(applicationId, true); } { ApplicationId baliId = new ApplicationId.Builder() .tenant(mytenantName) .applicationName("bali") .instanceName("quux") .build(); PrepareParams prepareParamsBali = new PrepareParams.Builder().applicationId(baliId).build(); applicationRepository.deploy(testApp, prepareParamsBali); deleteAndAssertOKResponseMocked(baliId, true); } } @Test public void testDeleteNonExistent() throws Exception { deleteAndAssertResponse(myTenantApplicationId, Zone.defaultZone(), Response.Status.NOT_FOUND, HttpErrorResponse.errorCodes.NOT_FOUND, "Unable to delete mytenant.default.default: Not found"); } @Test public void testGet() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); long sessionId = applicationRepository.deploy(testApp, prepareParams).sessionId(); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, true, vespaVersion); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, false, vespaVersion); } @Test public void testGetQuota() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); applicationRepository.deploy(testApp, prepareParams).sessionId(); var url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/quota"; var response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); var renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"rate\":0.0}", renderedString); } @Test public void testReindex() throws Exception { ApplicationCuratorDatabase database = applicationRepository.getTenant(applicationId).getApplicationRepo().database(); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationReindexing expected = ApplicationReindexing.ready(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, ""); expected = expected.withReady(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?cluster="); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?type=moo"); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo"); expected = expected.withReady("foo", clock.instant()) .withReady("boo", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo&type=bar,baz"); expected = expected.withReady("foo", "bar", clock.instant()) .withReady("foo", "baz", clock.instant()) .withReady("boo", "bar", clock.instant()) .withReady("boo", "baz", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, DELETE); expected = expected.enabled(false); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, POST); expected = expected.enabled(true); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); long now = clock.instant().toEpochMilli(); reindexing(applicationId, GET, "{" + " \"enabled\": true," + " \"status\": {" + " \"readyMillis\": " + (now - 2000) + " }," + " \"clusters\": [" + " {" + " \"name\": \"boo\"," + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }," + " {" + " \"name\": \"foo\", " + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }" + " ]" + "}"); } @Test public void testRestart() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertFalse(provisioner.restarted()); restart(applicationId, Zone.defaultZone()); assertTrue(provisioner.restarted()); assertEquals(applicationId, provisioner.lastApplicationId()); } @Test public void testSuspended() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertSuspended(false, applicationId, Zone.defaultZone()); orchestrator.suspend(applicationId); assertSuspended(true, applicationId, Zone.defaultZone()); } @Test public void testConverge() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); converge(applicationId, Zone.defaultZone()); } @Test public void testClusterControllerStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); String host = "foo.yahoo.com"; String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/clustercontroller/" + host + "/status/v1/clusterName1"; HttpProxy mockHttpProxy = mock(HttpProxy.class); ApplicationRepository applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withHostProvisionerProvider(HostProvisionerProvider.empty()) .withOrchestrator(orchestrator) .withTesterClient(testerClient) .withHttpProxy(mockHttpProxy) .build(); ApplicationHandler mockHandler = createApplicationHandler(applicationRepository); when(mockHttpProxy.get(any(), eq(host), eq(CLUSTERCONTROLLER_CONTAINER.serviceName),eq("clustercontroller-status/v1/clusterName1"))) .thenReturn(new StaticResponse(200, "text/html", "<html>...</html>")); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertHttpStatusCodeAndMessage(response, 200, "text/html", "<html>...</html>"); } @Test public void testPutIsIllegal() throws IOException { assertNotAllowed(Method.PUT); } @Test public void testFileDistributionStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); Zone zone = Zone.defaultZone(); HttpResponse response = fileDistributionStatus(applicationId, zone); assertEquals(200, response.getStatus()); assertEquals("{\"hosts\":[{\"hostname\":\"mytesthost\",\"status\":\"UNKNOWN\",\"message\":\"error: Connection error(104)\",\"fileReferences\":[]}],\"status\":\"UNKNOWN\"}", getRenderedString(response)); ApplicationId unknown = new ApplicationId.Builder().applicationName("unknown").tenant("default").build(); HttpResponse responseForUnknown = fileDistributionStatus(unknown, zone); assertEquals(404, responseForUnknown.getStatus()); assertEquals("{\"error-code\":\"NOT_FOUND\",\"message\":\"Unknown application id 'default.unknown'\"}", getRenderedString(responseForUnknown)); } @Test public void testGetLogs() throws IOException { applicationRepository.deploy(new File("src/test/apps/app-logserver-with-container"), prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/logs?from=100&to=200"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log line", getRenderedString(response)); } @Test public void testTesterStatus() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/status"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("OK", getRenderedString(response)); } @Test public void testTesterGetLog() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/log?after=1234"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log", getRenderedString(response)); } @Test public void testTesterStartTests() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/run/staging-test"; ApplicationHandler mockHandler = createApplicationHandler(); InputStream requestData = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); HttpRequest testRequest = createTestRequest(url, POST, requestData); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testTesterReady() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/ready"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testGetTestReport() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/report"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); assertEquals("report", getRenderedString(response)); } private void assertNotAllowed(Method method) throws IOException { String url = "http: deleteAndAssertResponse(url, Response.Status.METHOD_NOT_ALLOWED, HttpErrorResponse.errorCodes.METHOD_NOT_ALLOWED, "{\"error-code\":\"METHOD_NOT_ALLOWED\",\"message\":\"Method '" + method + "' is not supported\"}", method); } private void deleteAndAssertOKResponseMocked(ApplicationId applicationId, boolean fullAppIdInUrl) throws IOException { Tenant tenant = applicationRepository.getTenant(applicationId); long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, fullAppIdInUrl); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertOKResponse(Tenant tenant, ApplicationId applicationId) throws IOException { long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, true); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, boolean fullAppIdInUrl) throws IOException { String expectedResponse = "{\"message\":\"Application '" + applicationId + "' deleted\"}"; deleteAndAssertResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse) throws IOException { deleteAndAssertResponse(toUrlPath(applicationId, zone, true), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(String url, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse, Method method) throws IOException { ApplicationHandler handler = createApplicationHandler(); HttpResponse response = handler.handle(createTestRequest(url, method)); if (expectedStatus == 200) { assertHttpStatusCodeAndMessage(response, 200, expectedResponse); } else { HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, expectedStatus, errorCode, expectedResponse); } } private void assertApplicationResponse(ApplicationId applicationId, Zone zone, long expectedGeneration, boolean fullAppIdInUrl, Version expectedVersion) throws IOException { assertApplicationResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedGeneration, expectedVersion); } private void assertSuspended(boolean expectedValue, ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/suspended"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, "{\"suspended\":" + expectedValue + "}"); } private String toUrlPath(ApplicationId application, Zone zone, boolean fullAppIdInUrl) { String url = "http: if (fullAppIdInUrl) url = url + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + application.instance().value(); return url; } private void assertApplicationResponse(String url, long expectedGeneration, Version expectedVersion) throws IOException { HttpResponse response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); String renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"generation\":" + expectedGeneration + ",\"applicationPackageFileReference\":\"./\"" + ",\"modelVersions\":[\"" + expectedVersion.toFullString() + "\"]}", renderedString); } private void assertApplicationExists(ApplicationId applicationId, Zone zone) throws IOException { String tenantName = applicationId.tenant().value(); String expected = "[\"http: tenantName + "/application/" + applicationId.application().value() + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + applicationId.instance().value() + "\"]"; ListApplicationsHandler listApplicationsHandler = new ListApplicationsHandler(ListApplicationsHandler.testOnlyContext(), tenantRepository, Zone.defaultZone()); ListApplicationsHandlerTest.assertResponse(listApplicationsHandler, "http: Response.Status.OK, expected, GET); } private void reindexing(ApplicationId application, Method method) throws IOException { reindexing(application, method, null); } private void reindex(ApplicationId application, String query) throws IOException { String reindexUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindex" + query; assertHttpStatusCodeAndMessage(createApplicationHandler().handle(createTestRequest(reindexUrl, POST)), 200, ""); } private void restart(ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/restart"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, POST)); assertHttpStatusCodeAndMessage(response, 200, ""); } private void converge(ApplicationId application, Zone zone) throws IOException { String convergeUrl = toUrlPath(application, zone, true) + "/serviceconverge"; HttpResponse response = createApplicationHandler().handle(createTestRequest(convergeUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, ""); } private HttpResponse fileDistributionStatus(ApplicationId application, Zone zone) { String restartUrl = toUrlPath(application, zone, true) + "/filedistributionstatus"; return createApplicationHandler().handle(createTestRequest(restartUrl, GET)); } private static class MockStateApiFactory implements ConfigConvergenceChecker.StateApiFactory { boolean createdApi = false; @Override public ConfigConvergenceChecker.StateApi createStateApi(Client client, URI serviceUri) { createdApi = true; return () -> { try { return new ObjectMapper().readTree("{\"config\":{\"generation\":1}}"); } catch (IOException e) { throw new RuntimeException(e); } }; } } private ApplicationHandler createApplicationHandler() { return createApplicationHandler(applicationRepository); } private ApplicationHandler createApplicationHandler(ApplicationRepository applicationRepository) { return new ApplicationHandler(ApplicationHandler.testOnlyContext(), Zone.defaultZone(), applicationRepository); } private PrepareParams prepareParams(ApplicationId applicationId) { return new PrepareParams.Builder().applicationId(applicationId).build(); } }
class ApplicationHandlerTest { private static final File testApp = new File("src/test/apps/app"); private final static TenantName mytenantName = TenantName.from("mytenant"); private final static ApplicationId myTenantApplicationId = ApplicationId.from(mytenantName, ApplicationName.defaultName(), InstanceName.defaultName()); private final static ApplicationId applicationId = ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.defaultName()); private final static MockTesterClient testerClient = new MockTesterClient(); private static final MockLogRetriever logRetriever = new MockLogRetriever(); private static final Version vespaVersion = Version.fromString("7.8.9"); private TenantRepository tenantRepository; private ApplicationRepository applicationRepository; private MockProvisioner provisioner; private OrchestratorMock orchestrator; private ManualClock clock; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() throws IOException { clock = new ManualClock(); List<ModelFactory> modelFactories = List.of(DeployTester.createModelFactory(vespaVersion)); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath()) .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath()) .build(); TestComponentRegistry componentRegistry = new TestComponentRegistry.Builder() .provisioner(provisioner) .modelFactoryRegistry(new ModelFactoryRegistry(modelFactories)) .configServerConfig(configserverConfig) .clock(clock) .build(); tenantRepository = new TenantRepository(componentRegistry); tenantRepository.addTenant(mytenantName); provisioner = new MockProvisioner(); orchestrator = new OrchestratorMock(); applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withProvisioner(provisioner) .withOrchestrator(orchestrator) .withClock(componentRegistry.getClock()) .withTesterClient(testerClient) .withLogRetriever(logRetriever) .withConfigserverConfig(configserverConfig) .build(); } @After public void shutdown() { tenantRepository.close(); } @Test public void testDelete() throws Exception { TenantName foobar = TenantName.from("foobar"); tenantRepository.addTenant(foobar); { applicationRepository.deploy(testApp, prepareParams(applicationId)); Tenant mytenant = applicationRepository.getTenant(applicationId); deleteAndAssertOKResponse(mytenant, applicationId); } { applicationRepository.deploy(testApp, prepareParams(applicationId)); deleteAndAssertOKResponseMocked(applicationId, true); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationId fooId = new ApplicationId.Builder() .tenant(foobar) .applicationName("foo") .instanceName("quux") .build(); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); applicationRepository.deploy(testApp, prepareParams2); assertApplicationExists(fooId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(fooId, true); assertApplicationExists(applicationId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(applicationId, true); } { ApplicationId baliId = new ApplicationId.Builder() .tenant(mytenantName) .applicationName("bali") .instanceName("quux") .build(); PrepareParams prepareParamsBali = new PrepareParams.Builder().applicationId(baliId).build(); applicationRepository.deploy(testApp, prepareParamsBali); deleteAndAssertOKResponseMocked(baliId, true); } } @Test public void testDeleteNonExistent() throws Exception { deleteAndAssertResponse(myTenantApplicationId, Zone.defaultZone(), Response.Status.NOT_FOUND, HttpErrorResponse.errorCodes.NOT_FOUND, "Unable to delete mytenant.default.default: Not found"); } @Test public void testGet() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); long sessionId = applicationRepository.deploy(testApp, prepareParams).sessionId(); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, true, vespaVersion); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, false, vespaVersion); } @Test public void testGetQuota() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); applicationRepository.deploy(testApp, prepareParams).sessionId(); var url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/quota"; var response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); var renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"rate\":0.0}", renderedString); } @Test public void testReindex() throws Exception { ApplicationCuratorDatabase database = applicationRepository.getTenant(applicationId).getApplicationRepo().database(); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationReindexing expected = ApplicationReindexing.ready(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, ""); expected = expected.withReady(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?cluster="); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?type=moo"); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo"); expected = expected.withReady("foo", clock.instant()) .withReady("boo", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo&type=bar,baz"); expected = expected.withReady("foo", "bar", clock.instant()) .withReady("foo", "baz", clock.instant()) .withReady("boo", "bar", clock.instant()) .withReady("boo", "baz", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, DELETE); expected = expected.enabled(false); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, POST); expected = expected.enabled(true); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); database.writeReindexingStatus(applicationId, expected.withPending("boo", "bar", 123L)); long now = clock.instant().toEpochMilli(); reindexing(applicationId, GET, "{" + " \"enabled\": true," + " \"status\": {" + " \"readyMillis\": " + (now - 2000) + " }," + " \"clusters\": [" + " {" + " \"name\": \"boo\"," + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": [" + " {" + " \"type\": \"bar\"," + " \"requiredGeneration\": 123" + " }" + " ]," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }," + " {" + " \"name\": \"foo\", " + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }" + " ]" + "}"); } @Test public void testRestart() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertFalse(provisioner.restarted()); restart(applicationId, Zone.defaultZone()); assertTrue(provisioner.restarted()); assertEquals(applicationId, provisioner.lastApplicationId()); } @Test public void testSuspended() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertSuspended(false, applicationId, Zone.defaultZone()); orchestrator.suspend(applicationId); assertSuspended(true, applicationId, Zone.defaultZone()); } @Test public void testConverge() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); converge(applicationId, Zone.defaultZone()); } @Test public void testClusterControllerStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); String host = "foo.yahoo.com"; String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/clustercontroller/" + host + "/status/v1/clusterName1"; HttpProxy mockHttpProxy = mock(HttpProxy.class); ApplicationRepository applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withHostProvisionerProvider(HostProvisionerProvider.empty()) .withOrchestrator(orchestrator) .withTesterClient(testerClient) .withHttpProxy(mockHttpProxy) .build(); ApplicationHandler mockHandler = createApplicationHandler(applicationRepository); when(mockHttpProxy.get(any(), eq(host), eq(CLUSTERCONTROLLER_CONTAINER.serviceName),eq("clustercontroller-status/v1/clusterName1"))) .thenReturn(new StaticResponse(200, "text/html", "<html>...</html>")); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertHttpStatusCodeAndMessage(response, 200, "text/html", "<html>...</html>"); } @Test public void testPutIsIllegal() throws IOException { assertNotAllowed(Method.PUT); } @Test public void testFileDistributionStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); Zone zone = Zone.defaultZone(); HttpResponse response = fileDistributionStatus(applicationId, zone); assertEquals(200, response.getStatus()); assertEquals("{\"hosts\":[{\"hostname\":\"mytesthost\",\"status\":\"UNKNOWN\",\"message\":\"error: Connection error(104)\",\"fileReferences\":[]}],\"status\":\"UNKNOWN\"}", getRenderedString(response)); ApplicationId unknown = new ApplicationId.Builder().applicationName("unknown").tenant("default").build(); HttpResponse responseForUnknown = fileDistributionStatus(unknown, zone); assertEquals(404, responseForUnknown.getStatus()); assertEquals("{\"error-code\":\"NOT_FOUND\",\"message\":\"Unknown application id 'default.unknown'\"}", getRenderedString(responseForUnknown)); } @Test public void testGetLogs() throws IOException { applicationRepository.deploy(new File("src/test/apps/app-logserver-with-container"), prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/logs?from=100&to=200"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log line", getRenderedString(response)); } @Test public void testTesterStatus() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/status"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("OK", getRenderedString(response)); } @Test public void testTesterGetLog() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/log?after=1234"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log", getRenderedString(response)); } @Test public void testTesterStartTests() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/run/staging-test"; ApplicationHandler mockHandler = createApplicationHandler(); InputStream requestData = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); HttpRequest testRequest = createTestRequest(url, POST, requestData); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testTesterReady() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/ready"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testGetTestReport() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/report"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); assertEquals("report", getRenderedString(response)); } private void assertNotAllowed(Method method) throws IOException { String url = "http: deleteAndAssertResponse(url, Response.Status.METHOD_NOT_ALLOWED, HttpErrorResponse.errorCodes.METHOD_NOT_ALLOWED, "{\"error-code\":\"METHOD_NOT_ALLOWED\",\"message\":\"Method '" + method + "' is not supported\"}", method); } private void deleteAndAssertOKResponseMocked(ApplicationId applicationId, boolean fullAppIdInUrl) throws IOException { Tenant tenant = applicationRepository.getTenant(applicationId); long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, fullAppIdInUrl); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertOKResponse(Tenant tenant, ApplicationId applicationId) throws IOException { long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, true); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, boolean fullAppIdInUrl) throws IOException { String expectedResponse = "{\"message\":\"Application '" + applicationId + "' deleted\"}"; deleteAndAssertResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse) throws IOException { deleteAndAssertResponse(toUrlPath(applicationId, zone, true), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(String url, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse, Method method) throws IOException { ApplicationHandler handler = createApplicationHandler(); HttpResponse response = handler.handle(createTestRequest(url, method)); if (expectedStatus == 200) { assertHttpStatusCodeAndMessage(response, 200, expectedResponse); } else { HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, expectedStatus, errorCode, expectedResponse); } } private void assertApplicationResponse(ApplicationId applicationId, Zone zone, long expectedGeneration, boolean fullAppIdInUrl, Version expectedVersion) throws IOException { assertApplicationResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedGeneration, expectedVersion); } private void assertSuspended(boolean expectedValue, ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/suspended"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, "{\"suspended\":" + expectedValue + "}"); } private String toUrlPath(ApplicationId application, Zone zone, boolean fullAppIdInUrl) { String url = "http: if (fullAppIdInUrl) url = url + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + application.instance().value(); return url; } private void assertApplicationResponse(String url, long expectedGeneration, Version expectedVersion) throws IOException { HttpResponse response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); String renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"generation\":" + expectedGeneration + ",\"applicationPackageFileReference\":\"./\"" + ",\"modelVersions\":[\"" + expectedVersion.toFullString() + "\"]}", renderedString); } private void assertApplicationExists(ApplicationId applicationId, Zone zone) throws IOException { String tenantName = applicationId.tenant().value(); String expected = "[\"http: tenantName + "/application/" + applicationId.application().value() + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + applicationId.instance().value() + "\"]"; ListApplicationsHandler listApplicationsHandler = new ListApplicationsHandler(ListApplicationsHandler.testOnlyContext(), tenantRepository, Zone.defaultZone()); ListApplicationsHandlerTest.assertResponse(listApplicationsHandler, "http: Response.Status.OK, expected, GET); } private void reindexing(ApplicationId application, Method method) throws IOException { reindexing(application, method, null); } private void reindex(ApplicationId application, String query) throws IOException { String reindexUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindex" + query; assertHttpStatusCodeAndMessage(createApplicationHandler().handle(createTestRequest(reindexUrl, POST)), 200, ""); } private void restart(ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/restart"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, POST)); assertHttpStatusCodeAndMessage(response, 200, ""); } private void converge(ApplicationId application, Zone zone) throws IOException { String convergeUrl = toUrlPath(application, zone, true) + "/serviceconverge"; HttpResponse response = createApplicationHandler().handle(createTestRequest(convergeUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, ""); } private HttpResponse fileDistributionStatus(ApplicationId application, Zone zone) { String restartUrl = toUrlPath(application, zone, true) + "/filedistributionstatus"; return createApplicationHandler().handle(createTestRequest(restartUrl, GET)); } private static class MockStateApiFactory implements ConfigConvergenceChecker.StateApiFactory { boolean createdApi = false; @Override public ConfigConvergenceChecker.StateApi createStateApi(Client client, URI serviceUri) { createdApi = true; return () -> { try { return new ObjectMapper().readTree("{\"config\":{\"generation\":1}}"); } catch (IOException e) { throw new RuntimeException(e); } }; } } private ApplicationHandler createApplicationHandler() { return createApplicationHandler(applicationRepository); } private ApplicationHandler createApplicationHandler(ApplicationRepository applicationRepository) { return new ApplicationHandler(ApplicationHandler.testOnlyContext(), Zone.defaultZone(), applicationRepository); } private PrepareParams prepareParams(ApplicationId applicationId) { return new PrepareParams.Builder().applicationId(applicationId).build(); } }
And we _can_ send the timestamps for all documents when disabled, as the thing won't run anyway. I think we should probably settle on using either signal for disabling it.
public void getConfig(ReindexingConfig.Builder builder) { builder.clusterName(contentClusterName); if (reindexing == null || !reindexing.enabled()) { builder.enabled(false); return; } builder.enabled(true); for (NewDocumentType type : documentTypes) { String typeName = type.getFullName().getName(); reindexing.status(contentClusterName, typeName).ifPresent(status -> builder.status( typeName, new ReindexingConfig.Status.Builder() .readyAtMillis(status.ready().toEpochMilli()))); } }
return;
public void getConfig(ReindexingConfig.Builder builder) { builder.clusterName(contentClusterName); builder.enabled(reindexing.enabled()); for (NewDocumentType type : documentTypes) { String typeName = type.getFullName().getName(); reindexing.status(contentClusterName, typeName).ifPresent(status -> builder.status( typeName, new ReindexingConfig.Status.Builder() .readyAtMillis(status.ready().toEpochMilli()))); } }
class ReindexingController extends SimpleComponent implements ReindexingConfig.Producer { static final String REINDEXING_CONTROLLER_BUNDLE = "clustercontroller-reindexer"; private final Reindexing reindexing; private final String contentClusterName; private final Collection<NewDocumentType> documentTypes; ReindexingController(ReindexingContext context) { super(new ComponentModel( BundleInstantiationSpecification.getFromStrings( "reindexing-maintainer", "ai.vespa.reindexing.ReindexingMaintainer", REINDEXING_CONTROLLER_BUNDLE))); this.reindexing = context.reindexing().orElse(null); this.contentClusterName = context.contentClusterName(); this.documentTypes = context.documentTypes(); } @Override }
class ReindexingController extends SimpleComponent implements ReindexingConfig.Producer { static final String REINDEXING_CONTROLLER_BUNDLE = "clustercontroller-reindexer"; private final Reindexing reindexing; private final String contentClusterName; private final Collection<NewDocumentType> documentTypes; ReindexingController(ReindexingContext context) { super(new ComponentModel( BundleInstantiationSpecification.getFromStrings( "reindexing-maintainer", "ai.vespa.reindexing.ReindexingMaintainer", REINDEXING_CONTROLLER_BUNDLE))); this.reindexing = context.reindexing(); this.contentClusterName = context.contentClusterName(); this.documentTypes = context.documentTypes(); } @Override }
Debug code accidentally included?
private void reindexing(ApplicationId application, Method method, String expectedBody) throws IOException { String reindexingUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindexing"; HttpResponse response = createApplicationHandler().handle(createTestRequest(reindexingUrl, method)); assertEquals(200, response.getStatus()); if (expectedBody != null) { ByteArrayOutputStream out = new ByteArrayOutputStream(); response.render(out); System.err.println(out); assertJsonEquals(out.toString(), expectedBody); } }
System.err.println(out);
private void reindexing(ApplicationId application, Method method, String expectedBody) throws IOException { String reindexingUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindexing"; HttpResponse response = createApplicationHandler().handle(createTestRequest(reindexingUrl, method)); assertEquals(200, response.getStatus()); if (expectedBody != null) { ByteArrayOutputStream out = new ByteArrayOutputStream(); response.render(out); assertJsonEquals(out.toString(), expectedBody); } }
class ApplicationHandlerTest { private static final File testApp = new File("src/test/apps/app"); private final static TenantName mytenantName = TenantName.from("mytenant"); private final static ApplicationId myTenantApplicationId = ApplicationId.from(mytenantName, ApplicationName.defaultName(), InstanceName.defaultName()); private final static ApplicationId applicationId = ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.defaultName()); private final static MockTesterClient testerClient = new MockTesterClient(); private static final MockLogRetriever logRetriever = new MockLogRetriever(); private static final Version vespaVersion = Version.fromString("7.8.9"); private TenantRepository tenantRepository; private ApplicationRepository applicationRepository; private MockProvisioner provisioner; private OrchestratorMock orchestrator; private ManualClock clock; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() throws IOException { clock = new ManualClock(); List<ModelFactory> modelFactories = List.of(DeployTester.createModelFactory(vespaVersion)); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath()) .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath()) .build(); TestComponentRegistry componentRegistry = new TestComponentRegistry.Builder() .provisioner(provisioner) .modelFactoryRegistry(new ModelFactoryRegistry(modelFactories)) .configServerConfig(configserverConfig) .clock(clock) .build(); tenantRepository = new TenantRepository(componentRegistry); tenantRepository.addTenant(mytenantName); provisioner = new MockProvisioner(); orchestrator = new OrchestratorMock(); applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withProvisioner(provisioner) .withOrchestrator(orchestrator) .withClock(componentRegistry.getClock()) .withTesterClient(testerClient) .withLogRetriever(logRetriever) .withConfigserverConfig(configserverConfig) .build(); } @After public void shutdown() { tenantRepository.close(); } @Test public void testDelete() throws Exception { TenantName foobar = TenantName.from("foobar"); tenantRepository.addTenant(foobar); { applicationRepository.deploy(testApp, prepareParams(applicationId)); Tenant mytenant = applicationRepository.getTenant(applicationId); deleteAndAssertOKResponse(mytenant, applicationId); } { applicationRepository.deploy(testApp, prepareParams(applicationId)); deleteAndAssertOKResponseMocked(applicationId, true); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationId fooId = new ApplicationId.Builder() .tenant(foobar) .applicationName("foo") .instanceName("quux") .build(); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); applicationRepository.deploy(testApp, prepareParams2); assertApplicationExists(fooId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(fooId, true); assertApplicationExists(applicationId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(applicationId, true); } { ApplicationId baliId = new ApplicationId.Builder() .tenant(mytenantName) .applicationName("bali") .instanceName("quux") .build(); PrepareParams prepareParamsBali = new PrepareParams.Builder().applicationId(baliId).build(); applicationRepository.deploy(testApp, prepareParamsBali); deleteAndAssertOKResponseMocked(baliId, true); } } @Test public void testDeleteNonExistent() throws Exception { deleteAndAssertResponse(myTenantApplicationId, Zone.defaultZone(), Response.Status.NOT_FOUND, HttpErrorResponse.errorCodes.NOT_FOUND, "Unable to delete mytenant.default.default: Not found"); } @Test public void testGet() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); long sessionId = applicationRepository.deploy(testApp, prepareParams).sessionId(); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, true, vespaVersion); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, false, vespaVersion); } @Test public void testGetQuota() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); applicationRepository.deploy(testApp, prepareParams).sessionId(); var url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/quota"; var response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); var renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"rate\":0.0}", renderedString); } @Test public void testReindex() throws Exception { ApplicationCuratorDatabase database = applicationRepository.getTenant(applicationId).getApplicationRepo().database(); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationReindexing expected = ApplicationReindexing.ready(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, ""); expected = expected.withReady(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?cluster="); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?type=moo"); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo"); expected = expected.withReady("foo", clock.instant()) .withReady("boo", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo&type=bar,baz"); expected = expected.withReady("foo", "bar", clock.instant()) .withReady("foo", "baz", clock.instant()) .withReady("boo", "bar", clock.instant()) .withReady("boo", "baz", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, DELETE); expected = expected.enabled(false); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, POST); expected = expected.enabled(true); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); long now = clock.instant().toEpochMilli(); reindexing(applicationId, GET, "{" + " \"enabled\": true," + " \"status\": {" + " \"readyMillis\": " + (now - 2000) + " }," + " \"clusters\": [" + " {" + " \"name\": \"boo\"," + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }," + " {" + " \"name\": \"foo\", " + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }" + " ]" + "}"); } @Test public void testRestart() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertFalse(provisioner.restarted()); restart(applicationId, Zone.defaultZone()); assertTrue(provisioner.restarted()); assertEquals(applicationId, provisioner.lastApplicationId()); } @Test public void testSuspended() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertSuspended(false, applicationId, Zone.defaultZone()); orchestrator.suspend(applicationId); assertSuspended(true, applicationId, Zone.defaultZone()); } @Test public void testConverge() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); converge(applicationId, Zone.defaultZone()); } @Test public void testClusterControllerStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); String host = "foo.yahoo.com"; String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/clustercontroller/" + host + "/status/v1/clusterName1"; HttpProxy mockHttpProxy = mock(HttpProxy.class); ApplicationRepository applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withHostProvisionerProvider(HostProvisionerProvider.empty()) .withOrchestrator(orchestrator) .withTesterClient(testerClient) .withHttpProxy(mockHttpProxy) .build(); ApplicationHandler mockHandler = createApplicationHandler(applicationRepository); when(mockHttpProxy.get(any(), eq(host), eq(CLUSTERCONTROLLER_CONTAINER.serviceName),eq("clustercontroller-status/v1/clusterName1"))) .thenReturn(new StaticResponse(200, "text/html", "<html>...</html>")); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertHttpStatusCodeAndMessage(response, 200, "text/html", "<html>...</html>"); } @Test public void testPutIsIllegal() throws IOException { assertNotAllowed(Method.PUT); } @Test public void testFileDistributionStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); Zone zone = Zone.defaultZone(); HttpResponse response = fileDistributionStatus(applicationId, zone); assertEquals(200, response.getStatus()); assertEquals("{\"hosts\":[{\"hostname\":\"mytesthost\",\"status\":\"UNKNOWN\",\"message\":\"error: Connection error(104)\",\"fileReferences\":[]}],\"status\":\"UNKNOWN\"}", getRenderedString(response)); ApplicationId unknown = new ApplicationId.Builder().applicationName("unknown").tenant("default").build(); HttpResponse responseForUnknown = fileDistributionStatus(unknown, zone); assertEquals(404, responseForUnknown.getStatus()); assertEquals("{\"error-code\":\"NOT_FOUND\",\"message\":\"Unknown application id 'default.unknown'\"}", getRenderedString(responseForUnknown)); } @Test public void testGetLogs() throws IOException { applicationRepository.deploy(new File("src/test/apps/app-logserver-with-container"), prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/logs?from=100&to=200"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log line", getRenderedString(response)); } @Test public void testTesterStatus() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/status"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("OK", getRenderedString(response)); } @Test public void testTesterGetLog() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/log?after=1234"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log", getRenderedString(response)); } @Test public void testTesterStartTests() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/run/staging-test"; ApplicationHandler mockHandler = createApplicationHandler(); InputStream requestData = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); HttpRequest testRequest = createTestRequest(url, POST, requestData); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testTesterReady() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/ready"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testGetTestReport() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/report"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); assertEquals("report", getRenderedString(response)); } private void assertNotAllowed(Method method) throws IOException { String url = "http: deleteAndAssertResponse(url, Response.Status.METHOD_NOT_ALLOWED, HttpErrorResponse.errorCodes.METHOD_NOT_ALLOWED, "{\"error-code\":\"METHOD_NOT_ALLOWED\",\"message\":\"Method '" + method + "' is not supported\"}", method); } private void deleteAndAssertOKResponseMocked(ApplicationId applicationId, boolean fullAppIdInUrl) throws IOException { Tenant tenant = applicationRepository.getTenant(applicationId); long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, fullAppIdInUrl); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertOKResponse(Tenant tenant, ApplicationId applicationId) throws IOException { long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, true); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, boolean fullAppIdInUrl) throws IOException { String expectedResponse = "{\"message\":\"Application '" + applicationId + "' deleted\"}"; deleteAndAssertResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse) throws IOException { deleteAndAssertResponse(toUrlPath(applicationId, zone, true), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(String url, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse, Method method) throws IOException { ApplicationHandler handler = createApplicationHandler(); HttpResponse response = handler.handle(createTestRequest(url, method)); if (expectedStatus == 200) { assertHttpStatusCodeAndMessage(response, 200, expectedResponse); } else { HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, expectedStatus, errorCode, expectedResponse); } } private void assertApplicationResponse(ApplicationId applicationId, Zone zone, long expectedGeneration, boolean fullAppIdInUrl, Version expectedVersion) throws IOException { assertApplicationResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedGeneration, expectedVersion); } private void assertSuspended(boolean expectedValue, ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/suspended"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, "{\"suspended\":" + expectedValue + "}"); } private String toUrlPath(ApplicationId application, Zone zone, boolean fullAppIdInUrl) { String url = "http: if (fullAppIdInUrl) url = url + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + application.instance().value(); return url; } private void assertApplicationResponse(String url, long expectedGeneration, Version expectedVersion) throws IOException { HttpResponse response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); String renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"generation\":" + expectedGeneration + ",\"applicationPackageFileReference\":\"./\"" + ",\"modelVersions\":[\"" + expectedVersion.toFullString() + "\"]}", renderedString); } private void assertApplicationExists(ApplicationId applicationId, Zone zone) throws IOException { String tenantName = applicationId.tenant().value(); String expected = "[\"http: tenantName + "/application/" + applicationId.application().value() + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + applicationId.instance().value() + "\"]"; ListApplicationsHandler listApplicationsHandler = new ListApplicationsHandler(ListApplicationsHandler.testOnlyContext(), tenantRepository, Zone.defaultZone()); ListApplicationsHandlerTest.assertResponse(listApplicationsHandler, "http: Response.Status.OK, expected, GET); } private void reindexing(ApplicationId application, Method method) throws IOException { reindexing(application, method, null); } private void reindex(ApplicationId application, String query) throws IOException { String reindexUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindex" + query; assertHttpStatusCodeAndMessage(createApplicationHandler().handle(createTestRequest(reindexUrl, POST)), 200, ""); } private void restart(ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/restart"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, POST)); assertHttpStatusCodeAndMessage(response, 200, ""); } private void converge(ApplicationId application, Zone zone) throws IOException { String convergeUrl = toUrlPath(application, zone, true) + "/serviceconverge"; HttpResponse response = createApplicationHandler().handle(createTestRequest(convergeUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, ""); } private HttpResponse fileDistributionStatus(ApplicationId application, Zone zone) { String restartUrl = toUrlPath(application, zone, true) + "/filedistributionstatus"; return createApplicationHandler().handle(createTestRequest(restartUrl, GET)); } private static class MockStateApiFactory implements ConfigConvergenceChecker.StateApiFactory { boolean createdApi = false; @Override public ConfigConvergenceChecker.StateApi createStateApi(Client client, URI serviceUri) { createdApi = true; return () -> { try { return new ObjectMapper().readTree("{\"config\":{\"generation\":1}}"); } catch (IOException e) { throw new RuntimeException(e); } }; } } private ApplicationHandler createApplicationHandler() { return createApplicationHandler(applicationRepository); } private ApplicationHandler createApplicationHandler(ApplicationRepository applicationRepository) { return new ApplicationHandler(ApplicationHandler.testOnlyContext(), Zone.defaultZone(), applicationRepository); } private PrepareParams prepareParams(ApplicationId applicationId) { return new PrepareParams.Builder().applicationId(applicationId).build(); } }
class ApplicationHandlerTest { private static final File testApp = new File("src/test/apps/app"); private final static TenantName mytenantName = TenantName.from("mytenant"); private final static ApplicationId myTenantApplicationId = ApplicationId.from(mytenantName, ApplicationName.defaultName(), InstanceName.defaultName()); private final static ApplicationId applicationId = ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.defaultName()); private final static MockTesterClient testerClient = new MockTesterClient(); private static final MockLogRetriever logRetriever = new MockLogRetriever(); private static final Version vespaVersion = Version.fromString("7.8.9"); private TenantRepository tenantRepository; private ApplicationRepository applicationRepository; private MockProvisioner provisioner; private OrchestratorMock orchestrator; private ManualClock clock; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() throws IOException { clock = new ManualClock(); List<ModelFactory> modelFactories = List.of(DeployTester.createModelFactory(vespaVersion)); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath()) .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath()) .build(); TestComponentRegistry componentRegistry = new TestComponentRegistry.Builder() .provisioner(provisioner) .modelFactoryRegistry(new ModelFactoryRegistry(modelFactories)) .configServerConfig(configserverConfig) .clock(clock) .build(); tenantRepository = new TenantRepository(componentRegistry); tenantRepository.addTenant(mytenantName); provisioner = new MockProvisioner(); orchestrator = new OrchestratorMock(); applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withProvisioner(provisioner) .withOrchestrator(orchestrator) .withClock(componentRegistry.getClock()) .withTesterClient(testerClient) .withLogRetriever(logRetriever) .withConfigserverConfig(configserverConfig) .build(); } @After public void shutdown() { tenantRepository.close(); } @Test public void testDelete() throws Exception { TenantName foobar = TenantName.from("foobar"); tenantRepository.addTenant(foobar); { applicationRepository.deploy(testApp, prepareParams(applicationId)); Tenant mytenant = applicationRepository.getTenant(applicationId); deleteAndAssertOKResponse(mytenant, applicationId); } { applicationRepository.deploy(testApp, prepareParams(applicationId)); deleteAndAssertOKResponseMocked(applicationId, true); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationId fooId = new ApplicationId.Builder() .tenant(foobar) .applicationName("foo") .instanceName("quux") .build(); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); applicationRepository.deploy(testApp, prepareParams2); assertApplicationExists(fooId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(fooId, true); assertApplicationExists(applicationId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(applicationId, true); } { ApplicationId baliId = new ApplicationId.Builder() .tenant(mytenantName) .applicationName("bali") .instanceName("quux") .build(); PrepareParams prepareParamsBali = new PrepareParams.Builder().applicationId(baliId).build(); applicationRepository.deploy(testApp, prepareParamsBali); deleteAndAssertOKResponseMocked(baliId, true); } } @Test public void testDeleteNonExistent() throws Exception { deleteAndAssertResponse(myTenantApplicationId, Zone.defaultZone(), Response.Status.NOT_FOUND, HttpErrorResponse.errorCodes.NOT_FOUND, "Unable to delete mytenant.default.default: Not found"); } @Test public void testGet() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); long sessionId = applicationRepository.deploy(testApp, prepareParams).sessionId(); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, true, vespaVersion); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, false, vespaVersion); } @Test public void testGetQuota() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); applicationRepository.deploy(testApp, prepareParams).sessionId(); var url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/quota"; var response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); var renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"rate\":0.0}", renderedString); } @Test public void testReindex() throws Exception { ApplicationCuratorDatabase database = applicationRepository.getTenant(applicationId).getApplicationRepo().database(); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationReindexing expected = ApplicationReindexing.ready(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, ""); expected = expected.withReady(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?cluster="); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?type=moo"); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo"); expected = expected.withReady("foo", clock.instant()) .withReady("boo", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo&type=bar,baz"); expected = expected.withReady("foo", "bar", clock.instant()) .withReady("foo", "baz", clock.instant()) .withReady("boo", "bar", clock.instant()) .withReady("boo", "baz", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, DELETE); expected = expected.enabled(false); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, POST); expected = expected.enabled(true); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); database.writeReindexingStatus(applicationId, expected.withPending("boo", "bar", 123L)); long now = clock.instant().toEpochMilli(); reindexing(applicationId, GET, "{" + " \"enabled\": true," + " \"status\": {" + " \"readyMillis\": " + (now - 2000) + " }," + " \"clusters\": [" + " {" + " \"name\": \"boo\"," + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": [" + " {" + " \"type\": \"bar\"," + " \"requiredGeneration\": 123" + " }" + " ]," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }," + " {" + " \"name\": \"foo\", " + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }" + " ]" + "}"); } @Test public void testRestart() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertFalse(provisioner.restarted()); restart(applicationId, Zone.defaultZone()); assertTrue(provisioner.restarted()); assertEquals(applicationId, provisioner.lastApplicationId()); } @Test public void testSuspended() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertSuspended(false, applicationId, Zone.defaultZone()); orchestrator.suspend(applicationId); assertSuspended(true, applicationId, Zone.defaultZone()); } @Test public void testConverge() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); converge(applicationId, Zone.defaultZone()); } @Test public void testClusterControllerStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); String host = "foo.yahoo.com"; String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/clustercontroller/" + host + "/status/v1/clusterName1"; HttpProxy mockHttpProxy = mock(HttpProxy.class); ApplicationRepository applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withHostProvisionerProvider(HostProvisionerProvider.empty()) .withOrchestrator(orchestrator) .withTesterClient(testerClient) .withHttpProxy(mockHttpProxy) .build(); ApplicationHandler mockHandler = createApplicationHandler(applicationRepository); when(mockHttpProxy.get(any(), eq(host), eq(CLUSTERCONTROLLER_CONTAINER.serviceName),eq("clustercontroller-status/v1/clusterName1"))) .thenReturn(new StaticResponse(200, "text/html", "<html>...</html>")); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertHttpStatusCodeAndMessage(response, 200, "text/html", "<html>...</html>"); } @Test public void testPutIsIllegal() throws IOException { assertNotAllowed(Method.PUT); } @Test public void testFileDistributionStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); Zone zone = Zone.defaultZone(); HttpResponse response = fileDistributionStatus(applicationId, zone); assertEquals(200, response.getStatus()); assertEquals("{\"hosts\":[{\"hostname\":\"mytesthost\",\"status\":\"UNKNOWN\",\"message\":\"error: Connection error(104)\",\"fileReferences\":[]}],\"status\":\"UNKNOWN\"}", getRenderedString(response)); ApplicationId unknown = new ApplicationId.Builder().applicationName("unknown").tenant("default").build(); HttpResponse responseForUnknown = fileDistributionStatus(unknown, zone); assertEquals(404, responseForUnknown.getStatus()); assertEquals("{\"error-code\":\"NOT_FOUND\",\"message\":\"Unknown application id 'default.unknown'\"}", getRenderedString(responseForUnknown)); } @Test public void testGetLogs() throws IOException { applicationRepository.deploy(new File("src/test/apps/app-logserver-with-container"), prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/logs?from=100&to=200"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log line", getRenderedString(response)); } @Test public void testTesterStatus() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/status"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("OK", getRenderedString(response)); } @Test public void testTesterGetLog() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/log?after=1234"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log", getRenderedString(response)); } @Test public void testTesterStartTests() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/run/staging-test"; ApplicationHandler mockHandler = createApplicationHandler(); InputStream requestData = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); HttpRequest testRequest = createTestRequest(url, POST, requestData); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testTesterReady() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/ready"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testGetTestReport() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/report"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); assertEquals("report", getRenderedString(response)); } private void assertNotAllowed(Method method) throws IOException { String url = "http: deleteAndAssertResponse(url, Response.Status.METHOD_NOT_ALLOWED, HttpErrorResponse.errorCodes.METHOD_NOT_ALLOWED, "{\"error-code\":\"METHOD_NOT_ALLOWED\",\"message\":\"Method '" + method + "' is not supported\"}", method); } private void deleteAndAssertOKResponseMocked(ApplicationId applicationId, boolean fullAppIdInUrl) throws IOException { Tenant tenant = applicationRepository.getTenant(applicationId); long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, fullAppIdInUrl); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertOKResponse(Tenant tenant, ApplicationId applicationId) throws IOException { long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, true); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, boolean fullAppIdInUrl) throws IOException { String expectedResponse = "{\"message\":\"Application '" + applicationId + "' deleted\"}"; deleteAndAssertResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse) throws IOException { deleteAndAssertResponse(toUrlPath(applicationId, zone, true), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(String url, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse, Method method) throws IOException { ApplicationHandler handler = createApplicationHandler(); HttpResponse response = handler.handle(createTestRequest(url, method)); if (expectedStatus == 200) { assertHttpStatusCodeAndMessage(response, 200, expectedResponse); } else { HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, expectedStatus, errorCode, expectedResponse); } } private void assertApplicationResponse(ApplicationId applicationId, Zone zone, long expectedGeneration, boolean fullAppIdInUrl, Version expectedVersion) throws IOException { assertApplicationResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedGeneration, expectedVersion); } private void assertSuspended(boolean expectedValue, ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/suspended"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, "{\"suspended\":" + expectedValue + "}"); } private String toUrlPath(ApplicationId application, Zone zone, boolean fullAppIdInUrl) { String url = "http: if (fullAppIdInUrl) url = url + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + application.instance().value(); return url; } private void assertApplicationResponse(String url, long expectedGeneration, Version expectedVersion) throws IOException { HttpResponse response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); String renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"generation\":" + expectedGeneration + ",\"applicationPackageFileReference\":\"./\"" + ",\"modelVersions\":[\"" + expectedVersion.toFullString() + "\"]}", renderedString); } private void assertApplicationExists(ApplicationId applicationId, Zone zone) throws IOException { String tenantName = applicationId.tenant().value(); String expected = "[\"http: tenantName + "/application/" + applicationId.application().value() + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + applicationId.instance().value() + "\"]"; ListApplicationsHandler listApplicationsHandler = new ListApplicationsHandler(ListApplicationsHandler.testOnlyContext(), tenantRepository, Zone.defaultZone()); ListApplicationsHandlerTest.assertResponse(listApplicationsHandler, "http: Response.Status.OK, expected, GET); } private void reindexing(ApplicationId application, Method method) throws IOException { reindexing(application, method, null); } private void reindex(ApplicationId application, String query) throws IOException { String reindexUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindex" + query; assertHttpStatusCodeAndMessage(createApplicationHandler().handle(createTestRequest(reindexUrl, POST)), 200, ""); } private void restart(ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/restart"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, POST)); assertHttpStatusCodeAndMessage(response, 200, ""); } private void converge(ApplicationId application, Zone zone) throws IOException { String convergeUrl = toUrlPath(application, zone, true) + "/serviceconverge"; HttpResponse response = createApplicationHandler().handle(createTestRequest(convergeUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, ""); } private HttpResponse fileDistributionStatus(ApplicationId application, Zone zone) { String restartUrl = toUrlPath(application, zone, true) + "/filedistributionstatus"; return createApplicationHandler().handle(createTestRequest(restartUrl, GET)); } private static class MockStateApiFactory implements ConfigConvergenceChecker.StateApiFactory { boolean createdApi = false; @Override public ConfigConvergenceChecker.StateApi createStateApi(Client client, URI serviceUri) { createdApi = true; return () -> { try { return new ObjectMapper().readTree("{\"config\":{\"generation\":1}}"); } catch (IOException e) { throw new RuntimeException(e); } }; } } private ApplicationHandler createApplicationHandler() { return createApplicationHandler(applicationRepository); } private ApplicationHandler createApplicationHandler(ApplicationRepository applicationRepository) { return new ApplicationHandler(ApplicationHandler.testOnlyContext(), Zone.defaultZone(), applicationRepository); } private PrepareParams prepareParams(ApplicationId applicationId) { return new PrepareParams.Builder().applicationId(applicationId).build(); } }
```suggestion ```
private void reindexing(ApplicationId application, Method method, String expectedBody) throws IOException { String reindexingUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindexing"; HttpResponse response = createApplicationHandler().handle(createTestRequest(reindexingUrl, method)); assertEquals(200, response.getStatus()); if (expectedBody != null) { ByteArrayOutputStream out = new ByteArrayOutputStream(); response.render(out); System.err.println(out); assertJsonEquals(out.toString(), expectedBody); } }
System.err.println(out);
private void reindexing(ApplicationId application, Method method, String expectedBody) throws IOException { String reindexingUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindexing"; HttpResponse response = createApplicationHandler().handle(createTestRequest(reindexingUrl, method)); assertEquals(200, response.getStatus()); if (expectedBody != null) { ByteArrayOutputStream out = new ByteArrayOutputStream(); response.render(out); assertJsonEquals(out.toString(), expectedBody); } }
class ApplicationHandlerTest { private static final File testApp = new File("src/test/apps/app"); private final static TenantName mytenantName = TenantName.from("mytenant"); private final static ApplicationId myTenantApplicationId = ApplicationId.from(mytenantName, ApplicationName.defaultName(), InstanceName.defaultName()); private final static ApplicationId applicationId = ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.defaultName()); private final static MockTesterClient testerClient = new MockTesterClient(); private static final MockLogRetriever logRetriever = new MockLogRetriever(); private static final Version vespaVersion = Version.fromString("7.8.9"); private TenantRepository tenantRepository; private ApplicationRepository applicationRepository; private MockProvisioner provisioner; private OrchestratorMock orchestrator; private ManualClock clock; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() throws IOException { clock = new ManualClock(); List<ModelFactory> modelFactories = List.of(DeployTester.createModelFactory(vespaVersion)); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath()) .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath()) .build(); TestComponentRegistry componentRegistry = new TestComponentRegistry.Builder() .provisioner(provisioner) .modelFactoryRegistry(new ModelFactoryRegistry(modelFactories)) .configServerConfig(configserverConfig) .clock(clock) .build(); tenantRepository = new TenantRepository(componentRegistry); tenantRepository.addTenant(mytenantName); provisioner = new MockProvisioner(); orchestrator = new OrchestratorMock(); applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withProvisioner(provisioner) .withOrchestrator(orchestrator) .withClock(componentRegistry.getClock()) .withTesterClient(testerClient) .withLogRetriever(logRetriever) .withConfigserverConfig(configserverConfig) .build(); } @After public void shutdown() { tenantRepository.close(); } @Test public void testDelete() throws Exception { TenantName foobar = TenantName.from("foobar"); tenantRepository.addTenant(foobar); { applicationRepository.deploy(testApp, prepareParams(applicationId)); Tenant mytenant = applicationRepository.getTenant(applicationId); deleteAndAssertOKResponse(mytenant, applicationId); } { applicationRepository.deploy(testApp, prepareParams(applicationId)); deleteAndAssertOKResponseMocked(applicationId, true); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationId fooId = new ApplicationId.Builder() .tenant(foobar) .applicationName("foo") .instanceName("quux") .build(); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); applicationRepository.deploy(testApp, prepareParams2); assertApplicationExists(fooId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(fooId, true); assertApplicationExists(applicationId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(applicationId, true); } { ApplicationId baliId = new ApplicationId.Builder() .tenant(mytenantName) .applicationName("bali") .instanceName("quux") .build(); PrepareParams prepareParamsBali = new PrepareParams.Builder().applicationId(baliId).build(); applicationRepository.deploy(testApp, prepareParamsBali); deleteAndAssertOKResponseMocked(baliId, true); } } @Test public void testDeleteNonExistent() throws Exception { deleteAndAssertResponse(myTenantApplicationId, Zone.defaultZone(), Response.Status.NOT_FOUND, HttpErrorResponse.errorCodes.NOT_FOUND, "Unable to delete mytenant.default.default: Not found"); } @Test public void testGet() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); long sessionId = applicationRepository.deploy(testApp, prepareParams).sessionId(); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, true, vespaVersion); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, false, vespaVersion); } @Test public void testGetQuota() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); applicationRepository.deploy(testApp, prepareParams).sessionId(); var url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/quota"; var response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); var renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"rate\":0.0}", renderedString); } @Test public void testReindex() throws Exception { ApplicationCuratorDatabase database = applicationRepository.getTenant(applicationId).getApplicationRepo().database(); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationReindexing expected = ApplicationReindexing.ready(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, ""); expected = expected.withReady(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?cluster="); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?type=moo"); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo"); expected = expected.withReady("foo", clock.instant()) .withReady("boo", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo&type=bar,baz"); expected = expected.withReady("foo", "bar", clock.instant()) .withReady("foo", "baz", clock.instant()) .withReady("boo", "bar", clock.instant()) .withReady("boo", "baz", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, DELETE); expected = expected.enabled(false); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, POST); expected = expected.enabled(true); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); database.writeReindexingStatus(applicationId, expected.withPending("boo", "bar", 123L)); long now = clock.instant().toEpochMilli(); reindexing(applicationId, GET, "{" + " \"enabled\": true," + " \"status\": {" + " \"readyMillis\": " + (now - 2000) + " }," + " \"clusters\": [" + " {" + " \"name\": \"boo\"," + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": [" + " {" + " \"type\": \"bar\"," + " \"requiredGeneration\": 123" + " }" + " ]," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }," + " {" + " \"name\": \"foo\", " + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }" + " ]" + "}"); } @Test public void testRestart() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertFalse(provisioner.restarted()); restart(applicationId, Zone.defaultZone()); assertTrue(provisioner.restarted()); assertEquals(applicationId, provisioner.lastApplicationId()); } @Test public void testSuspended() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertSuspended(false, applicationId, Zone.defaultZone()); orchestrator.suspend(applicationId); assertSuspended(true, applicationId, Zone.defaultZone()); } @Test public void testConverge() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); converge(applicationId, Zone.defaultZone()); } @Test public void testClusterControllerStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); String host = "foo.yahoo.com"; String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/clustercontroller/" + host + "/status/v1/clusterName1"; HttpProxy mockHttpProxy = mock(HttpProxy.class); ApplicationRepository applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withHostProvisionerProvider(HostProvisionerProvider.empty()) .withOrchestrator(orchestrator) .withTesterClient(testerClient) .withHttpProxy(mockHttpProxy) .build(); ApplicationHandler mockHandler = createApplicationHandler(applicationRepository); when(mockHttpProxy.get(any(), eq(host), eq(CLUSTERCONTROLLER_CONTAINER.serviceName),eq("clustercontroller-status/v1/clusterName1"))) .thenReturn(new StaticResponse(200, "text/html", "<html>...</html>")); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertHttpStatusCodeAndMessage(response, 200, "text/html", "<html>...</html>"); } @Test public void testPutIsIllegal() throws IOException { assertNotAllowed(Method.PUT); } @Test public void testFileDistributionStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); Zone zone = Zone.defaultZone(); HttpResponse response = fileDistributionStatus(applicationId, zone); assertEquals(200, response.getStatus()); assertEquals("{\"hosts\":[{\"hostname\":\"mytesthost\",\"status\":\"UNKNOWN\",\"message\":\"error: Connection error(104)\",\"fileReferences\":[]}],\"status\":\"UNKNOWN\"}", getRenderedString(response)); ApplicationId unknown = new ApplicationId.Builder().applicationName("unknown").tenant("default").build(); HttpResponse responseForUnknown = fileDistributionStatus(unknown, zone); assertEquals(404, responseForUnknown.getStatus()); assertEquals("{\"error-code\":\"NOT_FOUND\",\"message\":\"Unknown application id 'default.unknown'\"}", getRenderedString(responseForUnknown)); } @Test public void testGetLogs() throws IOException { applicationRepository.deploy(new File("src/test/apps/app-logserver-with-container"), prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/logs?from=100&to=200"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log line", getRenderedString(response)); } @Test public void testTesterStatus() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/status"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("OK", getRenderedString(response)); } @Test public void testTesterGetLog() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/log?after=1234"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log", getRenderedString(response)); } @Test public void testTesterStartTests() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/run/staging-test"; ApplicationHandler mockHandler = createApplicationHandler(); InputStream requestData = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); HttpRequest testRequest = createTestRequest(url, POST, requestData); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testTesterReady() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/ready"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testGetTestReport() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/report"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); assertEquals("report", getRenderedString(response)); } private void assertNotAllowed(Method method) throws IOException { String url = "http: deleteAndAssertResponse(url, Response.Status.METHOD_NOT_ALLOWED, HttpErrorResponse.errorCodes.METHOD_NOT_ALLOWED, "{\"error-code\":\"METHOD_NOT_ALLOWED\",\"message\":\"Method '" + method + "' is not supported\"}", method); } private void deleteAndAssertOKResponseMocked(ApplicationId applicationId, boolean fullAppIdInUrl) throws IOException { Tenant tenant = applicationRepository.getTenant(applicationId); long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, fullAppIdInUrl); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertOKResponse(Tenant tenant, ApplicationId applicationId) throws IOException { long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, true); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, boolean fullAppIdInUrl) throws IOException { String expectedResponse = "{\"message\":\"Application '" + applicationId + "' deleted\"}"; deleteAndAssertResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse) throws IOException { deleteAndAssertResponse(toUrlPath(applicationId, zone, true), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(String url, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse, Method method) throws IOException { ApplicationHandler handler = createApplicationHandler(); HttpResponse response = handler.handle(createTestRequest(url, method)); if (expectedStatus == 200) { assertHttpStatusCodeAndMessage(response, 200, expectedResponse); } else { HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, expectedStatus, errorCode, expectedResponse); } } private void assertApplicationResponse(ApplicationId applicationId, Zone zone, long expectedGeneration, boolean fullAppIdInUrl, Version expectedVersion) throws IOException { assertApplicationResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedGeneration, expectedVersion); } private void assertSuspended(boolean expectedValue, ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/suspended"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, "{\"suspended\":" + expectedValue + "}"); } private String toUrlPath(ApplicationId application, Zone zone, boolean fullAppIdInUrl) { String url = "http: if (fullAppIdInUrl) url = url + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + application.instance().value(); return url; } private void assertApplicationResponse(String url, long expectedGeneration, Version expectedVersion) throws IOException { HttpResponse response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); String renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"generation\":" + expectedGeneration + ",\"applicationPackageFileReference\":\"./\"" + ",\"modelVersions\":[\"" + expectedVersion.toFullString() + "\"]}", renderedString); } private void assertApplicationExists(ApplicationId applicationId, Zone zone) throws IOException { String tenantName = applicationId.tenant().value(); String expected = "[\"http: tenantName + "/application/" + applicationId.application().value() + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + applicationId.instance().value() + "\"]"; ListApplicationsHandler listApplicationsHandler = new ListApplicationsHandler(ListApplicationsHandler.testOnlyContext(), tenantRepository, Zone.defaultZone()); ListApplicationsHandlerTest.assertResponse(listApplicationsHandler, "http: Response.Status.OK, expected, GET); } private void reindexing(ApplicationId application, Method method) throws IOException { reindexing(application, method, null); } private void reindex(ApplicationId application, String query) throws IOException { String reindexUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindex" + query; assertHttpStatusCodeAndMessage(createApplicationHandler().handle(createTestRequest(reindexUrl, POST)), 200, ""); } private void restart(ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/restart"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, POST)); assertHttpStatusCodeAndMessage(response, 200, ""); } private void converge(ApplicationId application, Zone zone) throws IOException { String convergeUrl = toUrlPath(application, zone, true) + "/serviceconverge"; HttpResponse response = createApplicationHandler().handle(createTestRequest(convergeUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, ""); } private HttpResponse fileDistributionStatus(ApplicationId application, Zone zone) { String restartUrl = toUrlPath(application, zone, true) + "/filedistributionstatus"; return createApplicationHandler().handle(createTestRequest(restartUrl, GET)); } private static class MockStateApiFactory implements ConfigConvergenceChecker.StateApiFactory { boolean createdApi = false; @Override public ConfigConvergenceChecker.StateApi createStateApi(Client client, URI serviceUri) { createdApi = true; return () -> { try { return new ObjectMapper().readTree("{\"config\":{\"generation\":1}}"); } catch (IOException e) { throw new RuntimeException(e); } }; } } private ApplicationHandler createApplicationHandler() { return createApplicationHandler(applicationRepository); } private ApplicationHandler createApplicationHandler(ApplicationRepository applicationRepository) { return new ApplicationHandler(ApplicationHandler.testOnlyContext(), Zone.defaultZone(), applicationRepository); } private PrepareParams prepareParams(ApplicationId applicationId) { return new PrepareParams.Builder().applicationId(applicationId).build(); } }
class ApplicationHandlerTest { private static final File testApp = new File("src/test/apps/app"); private final static TenantName mytenantName = TenantName.from("mytenant"); private final static ApplicationId myTenantApplicationId = ApplicationId.from(mytenantName, ApplicationName.defaultName(), InstanceName.defaultName()); private final static ApplicationId applicationId = ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.defaultName()); private final static MockTesterClient testerClient = new MockTesterClient(); private static final MockLogRetriever logRetriever = new MockLogRetriever(); private static final Version vespaVersion = Version.fromString("7.8.9"); private TenantRepository tenantRepository; private ApplicationRepository applicationRepository; private MockProvisioner provisioner; private OrchestratorMock orchestrator; private ManualClock clock; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() throws IOException { clock = new ManualClock(); List<ModelFactory> modelFactories = List.of(DeployTester.createModelFactory(vespaVersion)); ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath()) .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath()) .build(); TestComponentRegistry componentRegistry = new TestComponentRegistry.Builder() .provisioner(provisioner) .modelFactoryRegistry(new ModelFactoryRegistry(modelFactories)) .configServerConfig(configserverConfig) .clock(clock) .build(); tenantRepository = new TenantRepository(componentRegistry); tenantRepository.addTenant(mytenantName); provisioner = new MockProvisioner(); orchestrator = new OrchestratorMock(); applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withProvisioner(provisioner) .withOrchestrator(orchestrator) .withClock(componentRegistry.getClock()) .withTesterClient(testerClient) .withLogRetriever(logRetriever) .withConfigserverConfig(configserverConfig) .build(); } @After public void shutdown() { tenantRepository.close(); } @Test public void testDelete() throws Exception { TenantName foobar = TenantName.from("foobar"); tenantRepository.addTenant(foobar); { applicationRepository.deploy(testApp, prepareParams(applicationId)); Tenant mytenant = applicationRepository.getTenant(applicationId); deleteAndAssertOKResponse(mytenant, applicationId); } { applicationRepository.deploy(testApp, prepareParams(applicationId)); deleteAndAssertOKResponseMocked(applicationId, true); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationId fooId = new ApplicationId.Builder() .tenant(foobar) .applicationName("foo") .instanceName("quux") .build(); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); applicationRepository.deploy(testApp, prepareParams2); assertApplicationExists(fooId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(fooId, true); assertApplicationExists(applicationId, Zone.defaultZone()); deleteAndAssertOKResponseMocked(applicationId, true); } { ApplicationId baliId = new ApplicationId.Builder() .tenant(mytenantName) .applicationName("bali") .instanceName("quux") .build(); PrepareParams prepareParamsBali = new PrepareParams.Builder().applicationId(baliId).build(); applicationRepository.deploy(testApp, prepareParamsBali); deleteAndAssertOKResponseMocked(baliId, true); } } @Test public void testDeleteNonExistent() throws Exception { deleteAndAssertResponse(myTenantApplicationId, Zone.defaultZone(), Response.Status.NOT_FOUND, HttpErrorResponse.errorCodes.NOT_FOUND, "Unable to delete mytenant.default.default: Not found"); } @Test public void testGet() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); long sessionId = applicationRepository.deploy(testApp, prepareParams).sessionId(); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, true, vespaVersion); assertApplicationResponse(applicationId, Zone.defaultZone(), sessionId, false, vespaVersion); } @Test public void testGetQuota() throws Exception { PrepareParams prepareParams = new PrepareParams.Builder() .applicationId(applicationId) .vespaVersion(vespaVersion) .build(); applicationRepository.deploy(testApp, prepareParams).sessionId(); var url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/quota"; var response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); var renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"rate\":0.0}", renderedString); } @Test public void testReindex() throws Exception { ApplicationCuratorDatabase database = applicationRepository.getTenant(applicationId).getApplicationRepo().database(); applicationRepository.deploy(testApp, prepareParams(applicationId)); ApplicationReindexing expected = ApplicationReindexing.ready(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, ""); expected = expected.withReady(clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?cluster="); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); expected = expected.withReady(clock.instant()); reindex(applicationId, "?type=moo"); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo"); expected = expected.withReady("foo", clock.instant()) .withReady("boo", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); clock.advance(Duration.ofSeconds(1)); reindex(applicationId, "?cluster=foo,boo&type=bar,baz"); expected = expected.withReady("foo", "bar", clock.instant()) .withReady("foo", "baz", clock.instant()) .withReady("boo", "bar", clock.instant()) .withReady("boo", "baz", clock.instant()); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, DELETE); expected = expected.enabled(false); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); reindexing(applicationId, POST); expected = expected.enabled(true); assertEquals(expected, database.readReindexingStatus(applicationId).orElseThrow()); database.writeReindexingStatus(applicationId, expected.withPending("boo", "bar", 123L)); long now = clock.instant().toEpochMilli(); reindexing(applicationId, GET, "{" + " \"enabled\": true," + " \"status\": {" + " \"readyMillis\": " + (now - 2000) + " }," + " \"clusters\": [" + " {" + " \"name\": \"boo\"," + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": [" + " {" + " \"type\": \"bar\"," + " \"requiredGeneration\": 123" + " }" + " ]," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }," + " {" + " \"name\": \"foo\", " + " \"status\": {" + " \"readyMillis\": " + (now - 1000) + " }," + " \"pending\": []," + " \"ready\": [" + " {" + " \"type\": \"bar\"," + " \"readyMillis\": " + now + " }," + " {" + " \"type\": \"baz\"," + " \"readyMillis\": " + now + " }" + " ]" + " }" + " ]" + "}"); } @Test public void testRestart() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertFalse(provisioner.restarted()); restart(applicationId, Zone.defaultZone()); assertTrue(provisioner.restarted()); assertEquals(applicationId, provisioner.lastApplicationId()); } @Test public void testSuspended() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); assertSuspended(false, applicationId, Zone.defaultZone()); orchestrator.suspend(applicationId); assertSuspended(true, applicationId, Zone.defaultZone()); } @Test public void testConverge() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); converge(applicationId, Zone.defaultZone()); } @Test public void testClusterControllerStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); String host = "foo.yahoo.com"; String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/clustercontroller/" + host + "/status/v1/clusterName1"; HttpProxy mockHttpProxy = mock(HttpProxy.class); ApplicationRepository applicationRepository = new ApplicationRepository.Builder() .withTenantRepository(tenantRepository) .withHostProvisionerProvider(HostProvisionerProvider.empty()) .withOrchestrator(orchestrator) .withTesterClient(testerClient) .withHttpProxy(mockHttpProxy) .build(); ApplicationHandler mockHandler = createApplicationHandler(applicationRepository); when(mockHttpProxy.get(any(), eq(host), eq(CLUSTERCONTROLLER_CONTAINER.serviceName),eq("clustercontroller-status/v1/clusterName1"))) .thenReturn(new StaticResponse(200, "text/html", "<html>...</html>")); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertHttpStatusCodeAndMessage(response, 200, "text/html", "<html>...</html>"); } @Test public void testPutIsIllegal() throws IOException { assertNotAllowed(Method.PUT); } @Test public void testFileDistributionStatus() throws Exception { applicationRepository.deploy(testApp, prepareParams(applicationId)); Zone zone = Zone.defaultZone(); HttpResponse response = fileDistributionStatus(applicationId, zone); assertEquals(200, response.getStatus()); assertEquals("{\"hosts\":[{\"hostname\":\"mytesthost\",\"status\":\"UNKNOWN\",\"message\":\"error: Connection error(104)\",\"fileReferences\":[]}],\"status\":\"UNKNOWN\"}", getRenderedString(response)); ApplicationId unknown = new ApplicationId.Builder().applicationName("unknown").tenant("default").build(); HttpResponse responseForUnknown = fileDistributionStatus(unknown, zone); assertEquals(404, responseForUnknown.getStatus()); assertEquals("{\"error-code\":\"NOT_FOUND\",\"message\":\"Unknown application id 'default.unknown'\"}", getRenderedString(responseForUnknown)); } @Test public void testGetLogs() throws IOException { applicationRepository.deploy(new File("src/test/apps/app-logserver-with-container"), prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/logs?from=100&to=200"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log line", getRenderedString(response)); } @Test public void testTesterStatus() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/status"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("OK", getRenderedString(response)); } @Test public void testTesterGetLog() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/log?after=1234"; ApplicationHandler mockHandler = createApplicationHandler(); HttpResponse response = mockHandler.handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); assertEquals("log", getRenderedString(response)); } @Test public void testTesterStartTests() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/run/staging-test"; ApplicationHandler mockHandler = createApplicationHandler(); InputStream requestData = new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)); HttpRequest testRequest = createTestRequest(url, POST, requestData); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testTesterReady() { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/ready"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); } @Test public void testGetTestReport() throws IOException { applicationRepository.deploy(testApp, prepareParams(applicationId)); String url = toUrlPath(applicationId, Zone.defaultZone(), true) + "/tester/report"; ApplicationHandler mockHandler = createApplicationHandler(); HttpRequest testRequest = createTestRequest(url, GET); HttpResponse response = mockHandler.handle(testRequest); assertEquals(200, response.getStatus()); assertEquals("report", getRenderedString(response)); } private void assertNotAllowed(Method method) throws IOException { String url = "http: deleteAndAssertResponse(url, Response.Status.METHOD_NOT_ALLOWED, HttpErrorResponse.errorCodes.METHOD_NOT_ALLOWED, "{\"error-code\":\"METHOD_NOT_ALLOWED\",\"message\":\"Method '" + method + "' is not supported\"}", method); } private void deleteAndAssertOKResponseMocked(ApplicationId applicationId, boolean fullAppIdInUrl) throws IOException { Tenant tenant = applicationRepository.getTenant(applicationId); long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, fullAppIdInUrl); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertOKResponse(Tenant tenant, ApplicationId applicationId) throws IOException { long sessionId = tenant.getApplicationRepo().requireActiveSessionOf(applicationId); deleteAndAssertResponse(applicationId, Zone.defaultZone(), Response.Status.OK, null, true); assertNull(tenant.getSessionRepository().getLocalSession(sessionId)); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, boolean fullAppIdInUrl) throws IOException { String expectedResponse = "{\"message\":\"Application '" + applicationId + "' deleted\"}"; deleteAndAssertResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(ApplicationId applicationId, Zone zone, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse) throws IOException { deleteAndAssertResponse(toUrlPath(applicationId, zone, true), expectedStatus, errorCode, expectedResponse, Method.DELETE); } private void deleteAndAssertResponse(String url, int expectedStatus, HttpErrorResponse.errorCodes errorCode, String expectedResponse, Method method) throws IOException { ApplicationHandler handler = createApplicationHandler(); HttpResponse response = handler.handle(createTestRequest(url, method)); if (expectedStatus == 200) { assertHttpStatusCodeAndMessage(response, 200, expectedResponse); } else { HandlerTest.assertHttpStatusCodeErrorCodeAndMessage(response, expectedStatus, errorCode, expectedResponse); } } private void assertApplicationResponse(ApplicationId applicationId, Zone zone, long expectedGeneration, boolean fullAppIdInUrl, Version expectedVersion) throws IOException { assertApplicationResponse(toUrlPath(applicationId, zone, fullAppIdInUrl), expectedGeneration, expectedVersion); } private void assertSuspended(boolean expectedValue, ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/suspended"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, "{\"suspended\":" + expectedValue + "}"); } private String toUrlPath(ApplicationId application, Zone zone, boolean fullAppIdInUrl) { String url = "http: if (fullAppIdInUrl) url = url + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + application.instance().value(); return url; } private void assertApplicationResponse(String url, long expectedGeneration, Version expectedVersion) throws IOException { HttpResponse response = createApplicationHandler().handle(createTestRequest(url, GET)); assertEquals(200, response.getStatus()); String renderedString = SessionHandlerTest.getRenderedString(response); assertEquals("{\"generation\":" + expectedGeneration + ",\"applicationPackageFileReference\":\"./\"" + ",\"modelVersions\":[\"" + expectedVersion.toFullString() + "\"]}", renderedString); } private void assertApplicationExists(ApplicationId applicationId, Zone zone) throws IOException { String tenantName = applicationId.tenant().value(); String expected = "[\"http: tenantName + "/application/" + applicationId.application().value() + "/environment/" + zone.environment().value() + "/region/" + zone.region().value() + "/instance/" + applicationId.instance().value() + "\"]"; ListApplicationsHandler listApplicationsHandler = new ListApplicationsHandler(ListApplicationsHandler.testOnlyContext(), tenantRepository, Zone.defaultZone()); ListApplicationsHandlerTest.assertResponse(listApplicationsHandler, "http: Response.Status.OK, expected, GET); } private void reindexing(ApplicationId application, Method method) throws IOException { reindexing(application, method, null); } private void reindex(ApplicationId application, String query) throws IOException { String reindexUrl = toUrlPath(application, Zone.defaultZone(), true) + "/reindex" + query; assertHttpStatusCodeAndMessage(createApplicationHandler().handle(createTestRequest(reindexUrl, POST)), 200, ""); } private void restart(ApplicationId application, Zone zone) throws IOException { String restartUrl = toUrlPath(application, zone, true) + "/restart"; HttpResponse response = createApplicationHandler().handle(createTestRequest(restartUrl, POST)); assertHttpStatusCodeAndMessage(response, 200, ""); } private void converge(ApplicationId application, Zone zone) throws IOException { String convergeUrl = toUrlPath(application, zone, true) + "/serviceconverge"; HttpResponse response = createApplicationHandler().handle(createTestRequest(convergeUrl, GET)); assertHttpStatusCodeAndMessage(response, 200, ""); } private HttpResponse fileDistributionStatus(ApplicationId application, Zone zone) { String restartUrl = toUrlPath(application, zone, true) + "/filedistributionstatus"; return createApplicationHandler().handle(createTestRequest(restartUrl, GET)); } private static class MockStateApiFactory implements ConfigConvergenceChecker.StateApiFactory { boolean createdApi = false; @Override public ConfigConvergenceChecker.StateApi createStateApi(Client client, URI serviceUri) { createdApi = true; return () -> { try { return new ObjectMapper().readTree("{\"config\":{\"generation\":1}}"); } catch (IOException e) { throw new RuntimeException(e); } }; } } private ApplicationHandler createApplicationHandler() { return createApplicationHandler(applicationRepository); } private ApplicationHandler createApplicationHandler(ApplicationRepository applicationRepository) { return new ApplicationHandler(ApplicationHandler.testOnlyContext(), Zone.defaultZone(), applicationRepository); } private PrepareParams prepareParams(ApplicationId applicationId) { return new PrepareParams.Builder().applicationId(applicationId).build(); } }
Consider simplifying to `(double)downNodes.size() / (double)activeNodes.size() <= 0.2`.
public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); }
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage, flagSource); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(ApplicationTransaction transaction) { deactivate(db.readNodes(transaction.application(), State.reserved, State.active), transaction); applications.remove(transaction); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (zone.getCloud().dynamicProvisioning()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage, flagSource); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(ApplicationTransaction transaction) { deactivate(db.readNodes(transaction.application(), State.reserved, State.active), transaction); applications.remove(transaction); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (zone.getCloud().dynamicProvisioning()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
I find "working if not more than 20% fails" is easier to get than the simpler one but that might just be me ...
public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); }
return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 );
public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage, flagSource); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(ApplicationTransaction transaction) { deactivate(db.readNodes(transaction.application(), State.reserved, State.active), transaction); applications.remove(transaction); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (zone.getCloud().dynamicProvisioning()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage, flagSource); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** Deactivate nodes owned by application guarded by given lock */ public void deactivate(ApplicationTransaction transaction) { deactivate(db.readNodes(transaction.application(), State.reserved, State.active), transaction); applications.remove(transaction); } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (zone.getCloud().dynamicProvisioning()) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Acquires the appropriate lock for this node */ public Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Sure
private List<Address> addressesFromSlime(Inspector object) { Inspector addressesField = object.field(containersKey); if (addressesField.children() == 0) return List.of(); List<Address> addresses = new ArrayList<>(addressesField.children()); addressesField.traverse((ArrayTraverser) (i, elem) -> addresses.add(new Address(elem.field(containerHostnameKey).asString()))); return addresses; }
}
private List<Address> addressesFromSlime(Inspector object) { return SlimeUtils.entriesStream(object.field(containersKey)) .map(elem -> new Address(elem.field(containerHostnameKey).asString())) .collect(Collectors.toList()); }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String ipAddressPoolKey = "additionalIpAddresses"; private static final String containersKey = "containers"; private static final String containerHostnameKey = "hostname"; private static final String idKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String currentContainerImageKey = "currentDockerImage"; private static final String failCountKey = "failCount"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String wantToDeprovisionKey = "wantToDeprovision"; private static final String osVersionKey = "osVersion"; private static final String wantedOsVersionKey = "wantedOsVersion"; private static final String firmwareCheckKey = "firmwareCheck"; private static final String reportsKey = "reports"; private static final String modelNameKey = "modelName"; private static final String reservedToKey = "reservedTo"; private static final String exclusiveToKey = "exclusiveTo"; private static final String switchHostnameKey = "switchHostname"; private static final String flavorKey = "flavor"; private static final String resourcesKey = "resources"; private static final String diskKey = "disk"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String requestedResourcesKey = "requestedResources"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; private static final String networkPortsKey = "networkPorts"; private final Cache<Long, Node> cache; public NodeSerializer(NodeFlavors flavors, long cacheSize) { this.flavors = flavors; this.cache = CacheBuilder.newBuilder().maximumSize(cacheSize).recordStats().build(); } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } /** Returns cache statistics for this serializer */ public CacheStats cacheStats() { var stats = cache.stats(); return new CacheStats(stats.hitRate(), stats.evictionCount(), cache.size()); } private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey)); toSlime(node.ipConfig().pool().getIpSet(), object.setArray(ipAddressPoolKey)); toSlime(node.ipConfig().pool().getAddressList(), object); object.setString(idKey, node.id()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); toSlime(node.flavor(), object); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString())); object.setLong(failCountKey, node.status().failCount()); object.setBool(wantToRetireKey, node.status().wantToRetire()); object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString())); node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString())); node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli())); node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname)); node.reports().toSlime(object, reportsKey); node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName)); node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value())); node.exclusiveTo().ifPresent(applicationId -> object.setString(exclusiveToKey, applicationId.serializedForm())); } private void toSlime(Flavor flavor, Cursor object) { if (flavor.isConfigured()) { object.setString(flavorKey, flavor.name()); if (flavor.flavorOverrides().isPresent()) { Cursor resourcesObject = object.setObject(resourcesKey); flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb)); } } else { NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey)); } } private void toSlime(Allocation allocation, Cursor object) { NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey)); object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString()); allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged())); allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey))); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array) { ipAddresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString); } private void toSlime(List<Address> addresses, Cursor object) { if (addresses.isEmpty()) return; Cursor addressCursor = object.setArray(containersKey); addresses.forEach(address -> { addressCursor.addObject().setString(containerHostnameKey, address.hostname()); }); } public Node fromJson(Node.State state, byte[] data) { var key = Hashing.sipHash24().newHasher() .putString(state.name(), StandardCharsets.UTF_8) .putBytes(data).hash() .asLong(); try { return cache.get(key, () -> nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get())); } catch (ExecutionException e) { throw new UncheckedExecutionException(e); } } private Node nodeFromSlime(Node.State state, Inspector object) { Flavor flavor = flavorFromSlime(object); return new Node(object.field(idKey).asString(), new IP.Config(ipAddressesFromSlime(object, ipAddressesKey), ipAddressesFromSlime(object, ipAddressPoolKey), addressesFromSlime(object)), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavor, statusFromSlime(object), state, allocationFromSlime(flavor.resources(), object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString()), Reports.fromSlime(object.field(reportsKey)), modelNameFromSlime(object), reservedToFromSlime(object.field(reservedToKey)), exclusiveToFromSlime(object.field(exclusiveToKey)), switchHostnameFromSlime(object.field(switchHostnameKey))); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), versionFromSlime(object.field(vespaVersionKey)), containerImageFromSlime(object.field(currentContainerImageKey)), (int) object.field(failCountKey).asLong(), object.field(wantToRetireKey).asBool(), object.field(wantToDeprovisionKey).asBool(), new OsVersion(versionFromSlime(object.field(osVersionKey)), versionFromSlime(object.field(wantedOsVersionKey))), instantFromSlime(object.field(firmwareCheckKey))); } private Optional<String> switchHostnameFromSlime(Inspector field) { if (!field.valid()) return Optional.empty(); return Optional.of(field.asString()); } private Flavor flavorFromSlime(Inspector object) { Inspector resources = object.field(resourcesKey); if (object.field(flavorKey).valid()) { Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString()); if (!resources.valid()) return flavor; return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble())); } else { return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources)); } } private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey)) .orElse(assignedResources), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool(), NetworkPortsSerializer.fromSlime(object.field(networkPortsKey)))); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { return ClusterMembership.from(object.field(serviceIdKey).asString(), versionFromSlime(object.field(wantedVespaVersionKey)).get(), containerImageRepoFromSlime(object.field(wantedContainerImageRepoKey))); } private Optional<Version> versionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<DockerImage> containerImageRepoFromSlime(Inspector object) { if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty(); return Optional.of(DockerImage.fromString(object.asString())); } private Optional<DockerImage> containerImageFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(DockerImage.fromString(object.asString())); } private Optional<Instant> instantFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Instant.ofEpochMilli(object.asLong())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Set<String> ipAddressesFromSlime(Inspector object, String key) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<String> modelNameFromSlime(Inspector object) { if (object.field(modelNameKey).valid()) { return Optional.of(object.field(modelNameKey).asString()); } return Optional.empty(); } private Optional<TenantName> reservedToFromSlime(Inspector object) { if (! object.valid()) return Optional.empty(); if (object.type() != Type.STRING) throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object); return Optional.of(TenantName.from(object.asString())); } private Optional<ApplicationId> exclusiveToFromSlime(Inspector object) { if (! object.valid()) return Optional.empty(); if (object.type() != Type.STRING) throw new IllegalArgumentException("Expected 'exclusiveTo' to be a string but is " + object); return Optional.of(ApplicationId.fromSerializedForm(object.asString())); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "provisioned" : return History.Event.Type.provisioned; case "deprovisioned" : return History.Event.Type.deprovisioned; case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "wantToRetire": return History.Event.Type.wantToRetire; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "parked" : return History.Event.Type.parked; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; case "osUpgraded" : return History.Event.Type.osUpgraded; case "firmwareVerified" : return History.Event.Type.firmwareVerified; case "breakfixed" : return History.Event.Type.breakfixed; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case provisioned : return "provisioned"; case deprovisioned : return "deprovisioned"; case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case wantToRetire: return "wantToRetire"; case retired : return "retired"; case deactivated : return "deactivated"; case parked : return "parked"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; case osUpgraded: return "osUpgraded"; case firmwareVerified: return "firmwareVerified"; case breakfixed: return "breakfixed"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { switch (eventAgentField.asString()) { case "operator" : return Agent.operator; case "application" : return Agent.application; case "system" : return Agent.system; case "DirtyExpirer" : return Agent.DirtyExpirer; case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer; case "FailedExpirer" : return Agent.FailedExpirer; case "InactiveExpirer" : return Agent.InactiveExpirer; case "NodeFailer" : return Agent.NodeFailer; case "NodeHealthTracker" : return Agent.NodeHealthTracker; case "ProvisionedExpirer" : return Agent.ProvisionedExpirer; case "Rebalancer" : return Agent.Rebalancer; case "ReservationExpirer" : return Agent.ReservationExpirer; case "RetiringUpgrader" : return Agent.RetiringUpgrader; case "SpareCapacityMaintainer": return Agent.SpareCapacityMaintainer; case "SwitchRebalancer": return Agent.SwitchRebalancer; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case operator : return "operator"; case application : return "application"; case system : return "system"; case DirtyExpirer : return "DirtyExpirer"; case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer"; case FailedExpirer : return "FailedExpirer"; case InactiveExpirer : return "InactiveExpirer"; case NodeFailer : return "NodeFailer"; case NodeHealthTracker: return "NodeHealthTracker"; case ProvisionedExpirer : return "ProvisionedExpirer"; case Rebalancer : return "Rebalancer"; case ReservationExpirer : return "ReservationExpirer"; case RetiringUpgrader: return "RetiringUpgrader"; case SpareCapacityMaintainer: return "SpareCapacityMaintainer"; case SwitchRebalancer: return "SwitchRebalancer"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } static NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant": return NodeType.tenant; case "host": return NodeType.host; case "proxy": return NodeType.proxy; case "proxyhost": return NodeType.proxyhost; case "config": return NodeType.config; case "confighost": return NodeType.confighost; case "controller": return NodeType.controller; case "controllerhost": return NodeType.controllerhost; case "devhost": return NodeType.devhost; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } static String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; case proxyhost: return "proxyhost"; case config: return "config"; case confighost: return "confighost"; case controller: return "controller"; case controllerhost: return "controllerhost"; case devhost: return "devhost"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String ipAddressPoolKey = "additionalIpAddresses"; private static final String containersKey = "containers"; private static final String containerHostnameKey = "hostname"; private static final String idKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String currentContainerImageKey = "currentDockerImage"; private static final String failCountKey = "failCount"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String wantToDeprovisionKey = "wantToDeprovision"; private static final String osVersionKey = "osVersion"; private static final String wantedOsVersionKey = "wantedOsVersion"; private static final String firmwareCheckKey = "firmwareCheck"; private static final String reportsKey = "reports"; private static final String modelNameKey = "modelName"; private static final String reservedToKey = "reservedTo"; private static final String exclusiveToKey = "exclusiveTo"; private static final String switchHostnameKey = "switchHostname"; private static final String flavorKey = "flavor"; private static final String resourcesKey = "resources"; private static final String diskKey = "disk"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String requestedResourcesKey = "requestedResources"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String wantedContainerImageRepoKey = "wantedDockerImageRepo"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; private static final String networkPortsKey = "networkPorts"; private final Cache<Long, Node> cache; public NodeSerializer(NodeFlavors flavors, long cacheSize) { this.flavors = flavors; this.cache = CacheBuilder.newBuilder().maximumSize(cacheSize).recordStats().build(); } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } /** Returns cache statistics for this serializer */ public CacheStats cacheStats() { var stats = cache.stats(); return new CacheStats(stats.hitRate(), stats.evictionCount(), cache.size()); } private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey)); toSlime(node.ipConfig().pool().getIpSet(), object.setArray(ipAddressPoolKey)); toSlime(node.ipConfig().pool().getAddressList(), object); object.setString(idKey, node.id()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); toSlime(node.flavor(), object); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); node.status().containerImage().ifPresent(image -> object.setString(currentContainerImageKey, image.asString())); object.setLong(failCountKey, node.status().failCount()); object.setBool(wantToRetireKey, node.status().wantToRetire()); object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); node.status().osVersion().current().ifPresent(version -> object.setString(osVersionKey, version.toString())); node.status().osVersion().wanted().ifPresent(version -> object.setString(wantedOsVersionKey, version.toFullString())); node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli())); node.switchHostname().ifPresent(switchHostname -> object.setString(switchHostnameKey, switchHostname)); node.reports().toSlime(object, reportsKey); node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName)); node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value())); node.exclusiveTo().ifPresent(applicationId -> object.setString(exclusiveToKey, applicationId.serializedForm())); } private void toSlime(Flavor flavor, Cursor object) { if (flavor.isConfigured()) { object.setString(flavorKey, flavor.name()); if (flavor.flavorOverrides().isPresent()) { Cursor resourcesObject = object.setObject(resourcesKey); flavor.flavorOverrides().get().diskGb().ifPresent(diskGb -> resourcesObject.setDouble(diskKey, diskGb)); } } else { NodeResourcesSerializer.toSlime(flavor.resources(), object.setObject(resourcesKey)); } } private void toSlime(Allocation allocation, Cursor object) { NodeResourcesSerializer.toSlime(allocation.requestedResources(), object.setObject(requestedResourcesKey)); object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString()); allocation.membership().cluster().dockerImageRepo().ifPresent(repo -> object.setString(wantedContainerImageRepoKey, repo.untagged())); allocation.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, object.setArray(networkPortsKey))); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array) { ipAddresses.stream().map(IP::parse).sorted(IP.NATURAL_ORDER).map(IP::asString).forEach(array::addString); } private void toSlime(List<Address> addresses, Cursor object) { if (addresses.isEmpty()) return; Cursor addressCursor = object.setArray(containersKey); addresses.forEach(address -> { addressCursor.addObject().setString(containerHostnameKey, address.hostname()); }); } public Node fromJson(Node.State state, byte[] data) { var key = Hashing.sipHash24().newHasher() .putString(state.name(), StandardCharsets.UTF_8) .putBytes(data).hash() .asLong(); try { return cache.get(key, () -> nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get())); } catch (ExecutionException e) { throw new UncheckedExecutionException(e); } } private Node nodeFromSlime(Node.State state, Inspector object) { Flavor flavor = flavorFromSlime(object); return new Node(object.field(idKey).asString(), new IP.Config(ipAddressesFromSlime(object, ipAddressesKey), ipAddressesFromSlime(object, ipAddressPoolKey), addressesFromSlime(object)), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavor, statusFromSlime(object), state, allocationFromSlime(flavor.resources(), object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString()), Reports.fromSlime(object.field(reportsKey)), modelNameFromSlime(object), reservedToFromSlime(object.field(reservedToKey)), exclusiveToFromSlime(object.field(exclusiveToKey)), switchHostnameFromSlime(object.field(switchHostnameKey))); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), versionFromSlime(object.field(vespaVersionKey)), containerImageFromSlime(object.field(currentContainerImageKey)), (int) object.field(failCountKey).asLong(), object.field(wantToRetireKey).asBool(), object.field(wantToDeprovisionKey).asBool(), new OsVersion(versionFromSlime(object.field(osVersionKey)), versionFromSlime(object.field(wantedOsVersionKey))), instantFromSlime(object.field(firmwareCheckKey))); } private Optional<String> switchHostnameFromSlime(Inspector field) { if (!field.valid()) return Optional.empty(); return Optional.of(field.asString()); } private Flavor flavorFromSlime(Inspector object) { Inspector resources = object.field(resourcesKey); if (object.field(flavorKey).valid()) { Flavor flavor = flavors.getFlavorOrThrow(object.field(flavorKey).asString()); if (!resources.valid()) return flavor; return flavor.with(FlavorOverrides.ofDisk(resources.field(diskKey).asDouble())); } else { return new Flavor(NodeResourcesSerializer.resourcesFromSlime(resources)); } } private Optional<Allocation> allocationFromSlime(NodeResources assignedResources, Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), NodeResourcesSerializer.optionalResourcesFromSlime(object.field(requestedResourcesKey)) .orElse(assignedResources), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool(), NetworkPortsSerializer.fromSlime(object.field(networkPortsKey)))); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { return ClusterMembership.from(object.field(serviceIdKey).asString(), versionFromSlime(object.field(wantedVespaVersionKey)).get(), containerImageRepoFromSlime(object.field(wantedContainerImageRepoKey))); } private Optional<Version> versionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<DockerImage> containerImageRepoFromSlime(Inspector object) { if ( ! object.valid() || object.asString().isEmpty()) return Optional.empty(); return Optional.of(DockerImage.fromString(object.asString())); } private Optional<DockerImage> containerImageFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(DockerImage.fromString(object.asString())); } private Optional<Instant> instantFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Instant.ofEpochMilli(object.asLong())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Set<String> ipAddressesFromSlime(Inspector object, String key) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<String> modelNameFromSlime(Inspector object) { if (object.field(modelNameKey).valid()) { return Optional.of(object.field(modelNameKey).asString()); } return Optional.empty(); } private Optional<TenantName> reservedToFromSlime(Inspector object) { if (! object.valid()) return Optional.empty(); if (object.type() != Type.STRING) throw new IllegalArgumentException("Expected 'reservedTo' to be a string but is " + object); return Optional.of(TenantName.from(object.asString())); } private Optional<ApplicationId> exclusiveToFromSlime(Inspector object) { if (! object.valid()) return Optional.empty(); if (object.type() != Type.STRING) throw new IllegalArgumentException("Expected 'exclusiveTo' to be a string but is " + object); return Optional.of(ApplicationId.fromSerializedForm(object.asString())); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "provisioned" : return History.Event.Type.provisioned; case "deprovisioned" : return History.Event.Type.deprovisioned; case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "wantToRetire": return History.Event.Type.wantToRetire; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "parked" : return History.Event.Type.parked; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; case "osUpgraded" : return History.Event.Type.osUpgraded; case "firmwareVerified" : return History.Event.Type.firmwareVerified; case "breakfixed" : return History.Event.Type.breakfixed; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case provisioned : return "provisioned"; case deprovisioned : return "deprovisioned"; case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case wantToRetire: return "wantToRetire"; case retired : return "retired"; case deactivated : return "deactivated"; case parked : return "parked"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; case osUpgraded: return "osUpgraded"; case firmwareVerified: return "firmwareVerified"; case breakfixed: return "breakfixed"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { switch (eventAgentField.asString()) { case "operator" : return Agent.operator; case "application" : return Agent.application; case "system" : return Agent.system; case "DirtyExpirer" : return Agent.DirtyExpirer; case "DynamicProvisioningMaintainer" : return Agent.DynamicProvisioningMaintainer; case "FailedExpirer" : return Agent.FailedExpirer; case "InactiveExpirer" : return Agent.InactiveExpirer; case "NodeFailer" : return Agent.NodeFailer; case "NodeHealthTracker" : return Agent.NodeHealthTracker; case "ProvisionedExpirer" : return Agent.ProvisionedExpirer; case "Rebalancer" : return Agent.Rebalancer; case "ReservationExpirer" : return Agent.ReservationExpirer; case "RetiringUpgrader" : return Agent.RetiringUpgrader; case "SpareCapacityMaintainer": return Agent.SpareCapacityMaintainer; case "SwitchRebalancer": return Agent.SwitchRebalancer; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case operator : return "operator"; case application : return "application"; case system : return "system"; case DirtyExpirer : return "DirtyExpirer"; case DynamicProvisioningMaintainer : return "DynamicProvisioningMaintainer"; case FailedExpirer : return "FailedExpirer"; case InactiveExpirer : return "InactiveExpirer"; case NodeFailer : return "NodeFailer"; case NodeHealthTracker: return "NodeHealthTracker"; case ProvisionedExpirer : return "ProvisionedExpirer"; case Rebalancer : return "Rebalancer"; case ReservationExpirer : return "ReservationExpirer"; case RetiringUpgrader: return "RetiringUpgrader"; case SpareCapacityMaintainer: return "SpareCapacityMaintainer"; case SwitchRebalancer: return "SwitchRebalancer"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } static NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant": return NodeType.tenant; case "host": return NodeType.host; case "proxy": return NodeType.proxy; case "proxyhost": return NodeType.proxyhost; case "config": return NodeType.config; case "confighost": return NodeType.confighost; case "controller": return NodeType.controller; case "controllerhost": return NodeType.controllerhost; case "devhost": return NodeType.devhost; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } static String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; case proxyhost: return "proxyhost"; case config: return "config"; case confighost: return "confighost"; case controller: return "controller"; case controllerhost: return "controllerhost"; case devhost: return "devhost"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } }
Consider having "percent" removed from metric name, and instead represent it as a value between 0 and 1.
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { metric.set("reindexing.percent.done", status.progress().map(ProgressToken::percentFinished).orElse(status.state() == SUCCESSFUL ? 100.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(status.state())))); }); }
metric.set("reindexing.percent.done",
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(status.state())))); }); }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "ready"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
Also, we should discuss the metric names in an architect meeting at some time.
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { metric.set("reindexing.percent.done", status.progress().map(ProgressToken::percentFinished).orElse(status.state() == SUCCESSFUL ? 100.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(status.state())))); }); }
metric.set("reindexing.percent.done",
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(status.state())))); }); }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "ready"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
Yes, that’s the intention :) Feel free to reach out early with suggestions.
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { metric.set("reindexing.percent.done", status.progress().map(ProgressToken::percentFinished).orElse(status.state() == SUCCESSFUL ? 100.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(status.state())))); }); }
metric.set("reindexing.percent.done",
void dump(Reindexing reindexing) { reindexing.status().forEach((type, status) -> { metric.set("reindexing.progress", status.progress().map(ProgressToken::percentFinished).map(percentage -> percentage * 1e-2) .orElse(status.state() == SUCCESSFUL ? 1.0 : 0.0), metric.createContext(Map.of("clusterid", cluster, "documenttype", type.getName(), "state", toString(status.state())))); }); }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "ready"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
class ReindexingMetrics { private final Metric metric; private final String cluster; ReindexingMetrics(Metric metric, String cluster) { this.metric = metric; this.cluster = cluster; } private static String toString(Reindexing.State state) { switch (state) { case READY: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: throw new IllegalArgumentException("Unknown reindexing state '" + state + "'"); } } }
Why still call set method here?
public List<OptExpression> transform(OptExpression input, OptimizerContext context) { LogicalScanOperator scanOperator = (LogicalScanOperator) input.getOp(); ColumnRefSet requiredOutputColumns = context.getTaskContext().get(0).getRequiredColumns(); Set<ColumnRefOperator> outputColumns = scanOperator.getColRefToColumnMetaMap().keySet().stream().filter(requiredOutputColumns::contains) .collect(Collectors.toSet()); outputColumns.addAll(Utils.extractColumnRef(scanOperator.getPredicate())); if (outputColumns.size() == 0) { List<ColumnRefOperator> columnRefOperatorList = new ArrayList<>(scanOperator.getColRefToColumnMetaMap().keySet()); int smallestIndex = -1; int smallestColumnLength = Integer.MAX_VALUE; for (int index = 0; index < columnRefOperatorList.size(); ++index) { if (smallestIndex == -1) { smallestIndex = index; } Type columnType = columnRefOperatorList.get(index).getType(); if (columnType.isScalarType()) { int columnLength = columnType.getSlotSize(); if (columnLength < smallestColumnLength) { smallestIndex = index; smallestColumnLength = columnLength; } } } Preconditions.checkArgument(smallestIndex != -1); outputColumns.add(columnRefOperatorList.get(smallestIndex)); } if (scanOperator.getColRefToColumnMetaMap().keySet().equals(outputColumns)) { return Collections.emptyList(); } else { Map<ColumnRefOperator, Column> newColumnRefMap = outputColumns.stream() .collect(Collectors.toMap(identity(), scanOperator.getColRefToColumnMetaMap()::get)); if (scanOperator instanceof LogicalOlapScanOperator) { LogicalOlapScanOperator olapScanOperator = (LogicalOlapScanOperator) scanOperator; LogicalOlapScanOperator newScanOperator = new LogicalOlapScanOperator( ((LogicalOlapScanOperator) scanOperator).getOlapTable(), new ArrayList<>(outputColumns), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), ((LogicalOlapScanOperator) scanOperator).getDistributionSpec(), scanOperator.getLimit(), scanOperator.getPredicate()); newScanOperator.setSelectedIndexId(olapScanOperator.getSelectedIndexId()); newScanOperator.setSelectedPartitionId(olapScanOperator.getSelectedPartitionId()); newScanOperator.setSelectedTabletId(Lists.newArrayList(olapScanOperator.getSelectedTabletId())); newScanOperator.setPartitionNames(olapScanOperator.getPartitionNames()); newScanOperator.setHintsTabletIds(olapScanOperator.getHintsTabletIds()); return Lists.newArrayList(new OptExpression(newScanOperator)); } else { try { Class<? extends LogicalScanOperator> classType = scanOperator.getClass(); LogicalScanOperator newScanOperator = classType.getConstructor(Table.class, List.class, Map.class, Map.class, long.class, ScalarOperator.class).newInstance( scanOperator.getTable(), new ArrayList<>(outputColumns), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), scanOperator.getLimit(), scanOperator.getPredicate()); return Lists.newArrayList(new OptExpression(newScanOperator)); } catch (Exception e) { throw new StarRocksPlannerException(e.getMessage(), ErrorType.INTERNAL_ERROR); } } } }
newScanOperator.setSelectedIndexId(olapScanOperator.getSelectedIndexId());
public List<OptExpression> transform(OptExpression input, OptimizerContext context) { LogicalScanOperator scanOperator = (LogicalScanOperator) input.getOp(); ColumnRefSet requiredOutputColumns = context.getTaskContext().get(0).getRequiredColumns(); Set<ColumnRefOperator> outputColumns = scanOperator.getColRefToColumnMetaMap().keySet().stream().filter(requiredOutputColumns::contains) .collect(Collectors.toSet()); outputColumns.addAll(Utils.extractColumnRef(scanOperator.getPredicate())); if (outputColumns.size() == 0) { List<ColumnRefOperator> columnRefOperatorList = new ArrayList<>(scanOperator.getColRefToColumnMetaMap().keySet()); int smallestIndex = -1; int smallestColumnLength = Integer.MAX_VALUE; for (int index = 0; index < columnRefOperatorList.size(); ++index) { if (smallestIndex == -1) { smallestIndex = index; } Type columnType = columnRefOperatorList.get(index).getType(); if (columnType.isScalarType()) { int columnLength = columnType.getSlotSize(); if (columnLength < smallestColumnLength) { smallestIndex = index; smallestColumnLength = columnLength; } } } Preconditions.checkArgument(smallestIndex != -1); outputColumns.add(columnRefOperatorList.get(smallestIndex)); } if (scanOperator.getColRefToColumnMetaMap().keySet().equals(outputColumns)) { return Collections.emptyList(); } else { Map<ColumnRefOperator, Column> newColumnRefMap = outputColumns.stream() .collect(Collectors.toMap(identity(), scanOperator.getColRefToColumnMetaMap()::get)); if (scanOperator instanceof LogicalOlapScanOperator) { LogicalOlapScanOperator olapScanOperator = (LogicalOlapScanOperator) scanOperator; LogicalOlapScanOperator newScanOperator = new LogicalOlapScanOperator( olapScanOperator.getTable(), new ArrayList<>(outputColumns), newColumnRefMap, olapScanOperator.getColumnMetaToColRefMap(), olapScanOperator.getDistributionSpec(), olapScanOperator.getLimit(), olapScanOperator.getPredicate(), olapScanOperator.getSelectedIndexId(), olapScanOperator.getSelectedPartitionId(), olapScanOperator.getPartitionNames(), olapScanOperator.getSelectedTabletId(), olapScanOperator.getHintsTabletIds()); return Lists.newArrayList(new OptExpression(newScanOperator)); } else { try { Class<? extends LogicalScanOperator> classType = scanOperator.getClass(); LogicalScanOperator newScanOperator = classType.getConstructor(Table.class, List.class, Map.class, Map.class, long.class, ScalarOperator.class).newInstance( scanOperator.getTable(), new ArrayList<>(outputColumns), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), scanOperator.getLimit(), scanOperator.getPredicate()); return Lists.newArrayList(new OptExpression(newScanOperator)); } catch (Exception e) { throw new StarRocksPlannerException(e.getMessage(), ErrorType.INTERNAL_ERROR); } } } }
class PruneScanColumnRule extends TransformationRule { public static final PruneScanColumnRule OLAP_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_OLAP_SCAN); public static final PruneScanColumnRule SCHEMA_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_SCHEMA_SCAN); public static final PruneScanColumnRule MYSQL_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_MYSQL_SCAN); public static final PruneScanColumnRule ES_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_ES_SCAN); public PruneScanColumnRule(OperatorType logicalOperatorType) { super(RuleType.TF_PRUNE_OLAP_SCAN_COLUMNS, Pattern.create(logicalOperatorType)); } @Override }
class PruneScanColumnRule extends TransformationRule { public static final PruneScanColumnRule OLAP_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_OLAP_SCAN); public static final PruneScanColumnRule SCHEMA_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_SCHEMA_SCAN); public static final PruneScanColumnRule MYSQL_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_MYSQL_SCAN); public static final PruneScanColumnRule ES_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_ES_SCAN); public PruneScanColumnRule(OperatorType logicalOperatorType) { super(RuleType.TF_PRUNE_OLAP_SCAN_COLUMNS, Pattern.create(logicalOperatorType)); } @Override }
I think this check should be down in validation somewhere, not in ApplicationRepository? And isn't there a validate switch you should take into account?
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed()))
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
The actions are generated based on the validate switch down in validation. This is a higher-level evaluation. It could be in `Deployment.activate()`, but I think this is clearer, because that only conditionally runs `Deployment.prepare()`, which is the thing that sets these config change actions.
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed()))
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
This is just moving the logic that was previously in the controller (fail deployment if prepare reveals disallowed config change actions) closer to the action, so it actually works. Currently these deployments do happen, but the controller claims they didn't.
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed()))
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
Ok, I believe it was worse before. Even so, it could be that we can make it better than this. [New!] Why is this warning and otherwise silently ignoring prepare? Prepare should either succeed or throw? And why can't we do that down in the validation code like we do with everything else? And should this really disregard the "validate" argument?
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed()))
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
Let's plug this hole now, and then do it properly. It may be some work.
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed()))
private PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, DeployHandlerLogger logger) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Deployment deployment = prepare(sessionId, prepareParams, logger); if (deployment.configChangeActions().getRefeedActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-feed actions"); else if (deployment.configChangeActions().getReindexActions().getEntries().stream().anyMatch(entry -> ! entry.allowed())) logger.log(Level.WARNING, "Activation rejected because of disallowed re-index actions"); else deployment.activate(); return new PrepareResult(sessionId, deployment.configChangeActions(), logger); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
should keep the comment
public List<OptExpression> transform(OptExpression input, OptimizerContext context) { LogicalScanOperator scanOperator = (LogicalScanOperator) input.getOp(); ColumnRefSet requiredOutputColumns = context.getTaskContext().get(0).getRequiredColumns(); Set<ColumnRefOperator> outputColumns = scanOperator.getColRefToColumnMetaMap().keySet().stream().filter(requiredOutputColumns::contains) .collect(Collectors.toSet()); outputColumns.addAll(Utils.extractColumnRef(scanOperator.getPredicate())); if (outputColumns.size() == 0) { List<ColumnRefOperator> columnRefOperatorList = new ArrayList<>(scanOperator.getColRefToColumnMetaMap().keySet()); int smallestIndex = -1; int smallestColumnLength = Integer.MAX_VALUE; for (int index = 0; index < columnRefOperatorList.size(); ++index) { if (smallestIndex == -1) { smallestIndex = index; } Type columnType = columnRefOperatorList.get(index).getType(); if (columnType.isScalarType()) { int columnLength = columnType.getSlotSize(); if (columnLength < smallestColumnLength) { smallestIndex = index; smallestColumnLength = columnLength; } } } Preconditions.checkArgument(smallestIndex != -1); outputColumns.add(columnRefOperatorList.get(smallestIndex)); } if (scanOperator.getColRefToColumnMetaMap().keySet().equals(outputColumns)) { return Collections.emptyList(); } else { Map<ColumnRefOperator, Column> newColumnRefMap = outputColumns.stream() .collect(Collectors.toMap(identity(), scanOperator.getColRefToColumnMetaMap()::get)); if (scanOperator instanceof LogicalOlapScanOperator) { LogicalOlapScanOperator olapScanOperator = (LogicalOlapScanOperator) scanOperator; LogicalOlapScanOperator newScanOperator = new LogicalOlapScanOperator( ((LogicalOlapScanOperator) scanOperator).getOlapTable(), new ArrayList<>(outputColumns), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), ((LogicalOlapScanOperator) scanOperator).getDistributionSpec(), scanOperator.getLimit(), scanOperator.getPredicate()); newScanOperator.setSelectedIndexId(olapScanOperator.getSelectedIndexId()); newScanOperator.setSelectedPartitionId(olapScanOperator.getSelectedPartitionId()); newScanOperator.setSelectedTabletId(Lists.newArrayList(olapScanOperator.getSelectedTabletId())); newScanOperator.setPartitionNames(olapScanOperator.getPartitionNames()); newScanOperator.setHintsTabletIds(olapScanOperator.getHintsTabletIds()); return Lists.newArrayList(new OptExpression(newScanOperator)); } else { try { Class<? extends LogicalScanOperator> classType = scanOperator.getClass(); LogicalScanOperator newScanOperator = classType.getConstructor(Table.class, List.class, Map.class, Map.class, long.class, ScalarOperator.class).newInstance( scanOperator.getTable(), new ArrayList<>(outputColumns), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), scanOperator.getLimit(), scanOperator.getPredicate()); return Lists.newArrayList(new OptExpression(newScanOperator)); } catch (Exception e) { throw new StarRocksPlannerException(e.getMessage(), ErrorType.INTERNAL_ERROR); } } } }
Set<ColumnRefOperator> outputColumns =
public List<OptExpression> transform(OptExpression input, OptimizerContext context) { LogicalScanOperator scanOperator = (LogicalScanOperator) input.getOp(); ColumnRefSet requiredOutputColumns = context.getTaskContext().get(0).getRequiredColumns(); Set<ColumnRefOperator> outputColumns = scanOperator.getColRefToColumnMetaMap().keySet().stream().filter(requiredOutputColumns::contains) .collect(Collectors.toSet()); outputColumns.addAll(Utils.extractColumnRef(scanOperator.getPredicate())); if (outputColumns.size() == 0) { List<ColumnRefOperator> columnRefOperatorList = new ArrayList<>(scanOperator.getColRefToColumnMetaMap().keySet()); int smallestIndex = -1; int smallestColumnLength = Integer.MAX_VALUE; for (int index = 0; index < columnRefOperatorList.size(); ++index) { if (smallestIndex == -1) { smallestIndex = index; } Type columnType = columnRefOperatorList.get(index).getType(); if (columnType.isScalarType()) { int columnLength = columnType.getSlotSize(); if (columnLength < smallestColumnLength) { smallestIndex = index; smallestColumnLength = columnLength; } } } Preconditions.checkArgument(smallestIndex != -1); outputColumns.add(columnRefOperatorList.get(smallestIndex)); } if (scanOperator.getColRefToColumnMetaMap().keySet().equals(outputColumns)) { return Collections.emptyList(); } else { Map<ColumnRefOperator, Column> newColumnRefMap = outputColumns.stream() .collect(Collectors.toMap(identity(), scanOperator.getColRefToColumnMetaMap()::get)); if (scanOperator instanceof LogicalOlapScanOperator) { LogicalOlapScanOperator olapScanOperator = (LogicalOlapScanOperator) scanOperator; LogicalOlapScanOperator newScanOperator = new LogicalOlapScanOperator( olapScanOperator.getTable(), new ArrayList<>(outputColumns), newColumnRefMap, olapScanOperator.getColumnMetaToColRefMap(), olapScanOperator.getDistributionSpec(), olapScanOperator.getLimit(), olapScanOperator.getPredicate(), olapScanOperator.getSelectedIndexId(), olapScanOperator.getSelectedPartitionId(), olapScanOperator.getPartitionNames(), olapScanOperator.getSelectedTabletId(), olapScanOperator.getHintsTabletIds()); return Lists.newArrayList(new OptExpression(newScanOperator)); } else { try { Class<? extends LogicalScanOperator> classType = scanOperator.getClass(); LogicalScanOperator newScanOperator = classType.getConstructor(Table.class, List.class, Map.class, Map.class, long.class, ScalarOperator.class).newInstance( scanOperator.getTable(), new ArrayList<>(outputColumns), newColumnRefMap, scanOperator.getColumnMetaToColRefMap(), scanOperator.getLimit(), scanOperator.getPredicate()); return Lists.newArrayList(new OptExpression(newScanOperator)); } catch (Exception e) { throw new StarRocksPlannerException(e.getMessage(), ErrorType.INTERNAL_ERROR); } } } }
class PruneScanColumnRule extends TransformationRule { public static final PruneScanColumnRule OLAP_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_OLAP_SCAN); public static final PruneScanColumnRule SCHEMA_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_SCHEMA_SCAN); public static final PruneScanColumnRule MYSQL_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_MYSQL_SCAN); public static final PruneScanColumnRule ES_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_ES_SCAN); public PruneScanColumnRule(OperatorType logicalOperatorType) { super(RuleType.TF_PRUNE_OLAP_SCAN_COLUMNS, Pattern.create(logicalOperatorType)); } @Override }
class PruneScanColumnRule extends TransformationRule { public static final PruneScanColumnRule OLAP_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_OLAP_SCAN); public static final PruneScanColumnRule SCHEMA_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_SCHEMA_SCAN); public static final PruneScanColumnRule MYSQL_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_MYSQL_SCAN); public static final PruneScanColumnRule ES_SCAN = new PruneScanColumnRule(OperatorType.LOGICAL_ES_SCAN); public PruneScanColumnRule(OperatorType logicalOperatorType) { super(RuleType.TF_PRUNE_OLAP_SCAN_COLUMNS, Pattern.create(logicalOperatorType)); } @Override }
I think this second test is redundant. You can remove it.
public void changing_tensor_type_of_tensor_field_requires_refeed() throws Exception { new Fixture( "field f1 type tensor(x[2]) { indexing: attribute }", "field f1 type tensor(x[3]) { indexing: attribute }") .assertValidation(newRefeedAction(ClusterSpec.Id.from("test"), "field-type-change", ValidationOverrides.empty, "Field 'f1' changed: data type: 'tensor(x[2])' -> 'tensor(x[3])'", Instant.now())); new Fixture( "field f1 type tensor(x[5]) { indexing: attribute }", "field f1 type tensor(x[3]) { indexing: attribute }") .assertValidation(newRefeedAction(ClusterSpec.Id.from("test"), "field-type-change", ValidationOverrides.empty, "Field 'f1' changed: data type: 'tensor(x[5])' -> 'tensor(x[3])'", Instant.now())); }
"field f1 type tensor(x[5]) { indexing: attribute }",
public void changing_tensor_type_of_tensor_field_requires_refeed() throws Exception { new Fixture( "field f1 type tensor(x[2]) { indexing: attribute }", "field f1 type tensor(x[3]) { indexing: attribute }") .assertValidation(newRefeedAction(ClusterSpec.Id.from("test"), "field-type-change", ValidationOverrides.empty, "Field 'f1' changed: data type: 'tensor(x[2])' -> 'tensor(x[3])'", Instant.now())); new Fixture( "field f1 type tensor(x[5]) { indexing: attribute }", "field f1 type tensor(x[3]) { indexing: attribute }") .assertValidation(newRefeedAction(ClusterSpec.Id.from("test"), "field-type-change", ValidationOverrides.empty, "Field 'f1' changed: data type: 'tensor(x[5])' -> 'tensor(x[3])'", Instant.now())); }
class Fixture extends ContentClusterFixture { DocumentTypeChangeValidator validator; public Fixture(String currentSd, String nextSd) throws Exception { super(currentSd, nextSd); validator = new DocumentTypeChangeValidator(ClusterSpec.Id.from("test"), currentDocType(), nextDocType()); } @Override public List<VespaConfigChangeAction> validate() { return validator.validate(ValidationOverrides.empty, Instant.now()); } }
class Fixture extends ContentClusterFixture { DocumentTypeChangeValidator validator; public Fixture(String currentSd, String nextSd) throws Exception { super(currentSd, nextSd); validator = new DocumentTypeChangeValidator(ClusterSpec.Id.from("test"), currentDocType(), nextDocType()); } @Override public List<VespaConfigChangeAction> validate() { return validator.validate(ValidationOverrides.empty, Instant.now()); } }
Consider including the iteration in the message, to make it clearer for the reader of the message it is actually running more often
protected boolean maintain() { if (iteration % 10 == 0) log.log(LogLevel.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName()); applicationRepository.deleteExpiredLocalSessions(); if (hostedVespa) { Duration expiryTime = Duration.ofMinutes(90); int deleted = applicationRepository.deleteExpiredRemoteSessions(expiryTime); log.log(LogLevel.FINE, () -> "Deleted " + deleted + " expired remote sessions older than " + expiryTime); } iteration++; return true; }
log.log(LogLevel.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName());
protected boolean maintain() { if (iteration % 10 == 0) log.log(LogLevel.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName() + ", iteration " + iteration); applicationRepository.deleteExpiredLocalSessions(); if (hostedVespa) { Duration expiryTime = Duration.ofMinutes(90); int deleted = applicationRepository.deleteExpiredRemoteSessions(expiryTime); log.log(LogLevel.FINE, () -> "Deleted " + deleted + " expired remote sessions older than " + expiryTime); } iteration++; return true; }
class SessionsMaintainer extends ConfigServerMaintainer { private final boolean hostedVespa; private int iteration = 0; SessionsMaintainer(ApplicationRepository applicationRepository, Curator curator, Duration interval, FlagSource flagSource) { super(applicationRepository, curator, flagSource, Duration.ofMinutes(1), interval); this.hostedVespa = applicationRepository.configserverConfig().hostedVespa(); } @Override }
class SessionsMaintainer extends ConfigServerMaintainer { private final boolean hostedVespa; private int iteration = 0; SessionsMaintainer(ApplicationRepository applicationRepository, Curator curator, Duration interval, FlagSource flagSource) { super(applicationRepository, curator, flagSource, Duration.ofMinutes(1), interval); this.hostedVespa = applicationRepository.configserverConfig().hostedVespa(); } @Override }
Good point, done
protected boolean maintain() { if (iteration % 10 == 0) log.log(LogLevel.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName()); applicationRepository.deleteExpiredLocalSessions(); if (hostedVespa) { Duration expiryTime = Duration.ofMinutes(90); int deleted = applicationRepository.deleteExpiredRemoteSessions(expiryTime); log.log(LogLevel.FINE, () -> "Deleted " + deleted + " expired remote sessions older than " + expiryTime); } iteration++; return true; }
log.log(LogLevel.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName());
protected boolean maintain() { if (iteration % 10 == 0) log.log(LogLevel.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName() + ", iteration " + iteration); applicationRepository.deleteExpiredLocalSessions(); if (hostedVespa) { Duration expiryTime = Duration.ofMinutes(90); int deleted = applicationRepository.deleteExpiredRemoteSessions(expiryTime); log.log(LogLevel.FINE, () -> "Deleted " + deleted + " expired remote sessions older than " + expiryTime); } iteration++; return true; }
class SessionsMaintainer extends ConfigServerMaintainer { private final boolean hostedVespa; private int iteration = 0; SessionsMaintainer(ApplicationRepository applicationRepository, Curator curator, Duration interval, FlagSource flagSource) { super(applicationRepository, curator, flagSource, Duration.ofMinutes(1), interval); this.hostedVespa = applicationRepository.configserverConfig().hostedVespa(); } @Override }
class SessionsMaintainer extends ConfigServerMaintainer { private final boolean hostedVespa; private int iteration = 0; SessionsMaintainer(ApplicationRepository applicationRepository, Curator curator, Duration interval, FlagSource flagSource) { super(applicationRepository, curator, flagSource, Duration.ofMinutes(1), interval); this.hostedVespa = applicationRepository.configserverConfig().hostedVespa(); } @Override }
byCluster?
private void updateAllocationMetrics(NodeList nodes) { Map<ClusterKey, List<Node>> byApplication = nodes.stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterKey(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byApplication.forEach((clusterKey, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / (double) activeNodes; } Map<String, String> dimensions = new HashMap<>(dimensions(clusterKey.application)); dimensions.put("clusterId", clusterKey.cluster.value()); metric.set("nodes.active", activeNodes, getContext(dimensions)); metric.set("nodes.nonActive", nonActiveNodes, getContext(dimensions)); metric.set("nodes.nonActiveFraction", nonActiveFraction, getContext(dimensions)); }); }
Map<ClusterKey, List<Node>> byApplication = nodes.stream()
private void updateAllocationMetrics(NodeList nodes) { Map<ClusterKey, List<Node>> byCluster = nodes.stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterKey(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterKey, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / (double) activeNodes; } Map<String, String> dimensions = new HashMap<>(dimensions(clusterKey.application)); dimensions.put("clusterId", clusterKey.cluster.value()); metric.set("nodes.active", activeNodes, getContext(dimensions)); metric.set("nodes.nonActive", nonActiveNodes, getContext(dimensions)); metric.set("nodes.nonActiveFraction", nonActiveFraction, getContext(dimensions)); }); }
class MetricsReporter extends NodeRepositoryMaintainer { private final Metric metric; private final Orchestrator orchestrator; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, Orchestrator orchestrator, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.orchestrator = orchestrator; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override public boolean maintain() { NodeList nodes = nodeRepository().list(); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateDockerMetrics(nodes); updateTenantUsageMetrics(nodes); updateAllocationMetrics(nodes); return true; } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailParentHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> orchestrator.getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); metric.set("allowedToBeDown", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.history().event(History.Event.Type.down).isPresent(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { Map<State, List<Node>> nodesByState = nodes.nodeType(NodeType.tenant).asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { List<Node> nodesInState = nodesByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Hosts", nodesInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); metric.set("lockAttempt.acquire", lockMetrics.getAndResetAcquireCount(), context); metric.set("lockAttempt.acquireFailed", lockMetrics.getAndResetAcquireFailedCount(), context); metric.set("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); metric.set("lockAttempt.locked", lockMetrics.getAndResetAcquireSucceededCount(), context); metric.set("lockAttempt.release", lockMetrics.getAndResetReleaseCount(), context); metric.set("lockAttempt.releaseFailed", lockMetrics.getAndResetReleaseFailedCount(), context); metric.set("lockAttempt.reentry", lockMetrics.getAndResetReentryCount(), context); metric.set("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); metric.set("lockAttempt.nakedRelease", lockMetrics.getAndResetNakedReleaseCount(), context); metric.set("lockAttempt.acquireWithoutRelease", lockMetrics.getAndResetAcquireWithoutReleaseCount(), context); metric.set("lockAttempt.foreignRelease", lockMetrics.getAndResetForeignReleaseCount(), context); setLockLatencyMetrics("acquire", lockMetrics.getAndResetAcquireLatencyMetrics(), context); setLockLatencyMetrics("locked", lockMetrics.getAndResetLockedLatencyMetrics(), context); }); } private void setLockLatencyMetrics(String name, LatencyMetrics latencyMetrics, Metric.Context context) { metric.set("lockAttempt." + name + "Latency", latencyMetrics.latencySeconds(), context); metric.set("lockAttempt." + name + "MaxActiveLatency", latencyMetrics.maxActiveLatencySeconds(), context); metric.set("lockAttempt." + name + "Hz", latencyMetrics.startHz(), context); metric.set("lockAttempt." + name + "Load", latencyMetrics.load(), context); } private void updateDockerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node dockerHost) { return nodes.childrenOf(dockerHost).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(dockerHost.flavor().resources().justNumbers(), NodeResources::subtract); } private static class ClusterKey { private final ApplicationId application; private final ClusterSpec.Id cluster; public ClusterKey(ApplicationId application, ClusterSpec.Id cluster) { this.application = application; this.cluster = cluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterKey that = (ClusterKey) o; return application.equals(that.application) && cluster.equals(that.cluster); } @Override public int hashCode() { return Objects.hash(application, cluster); } } }
class MetricsReporter extends NodeRepositoryMaintainer { private final Metric metric; private final Orchestrator orchestrator; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, Orchestrator orchestrator, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.orchestrator = orchestrator; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override public boolean maintain() { NodeList nodes = nodeRepository().list(); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateDockerMetrics(nodes); updateTenantUsageMetrics(nodes); updateRepairTicketMetrics(nodes); updateAllocationMetrics(nodes); return true; } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailParentHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> orchestrator.getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); metric.set("allowedToBeDown", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.history().event(History.Event.Type.down).isPresent(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { Map<State, List<Node>> nodesByState = nodes.nodeType(NodeType.tenant).asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { List<Node> nodesInState = nodesByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Hosts", nodesInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); metric.set("lockAttempt.acquire", lockMetrics.getAndResetAcquireCount(), context); metric.set("lockAttempt.acquireFailed", lockMetrics.getAndResetAcquireFailedCount(), context); metric.set("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); metric.set("lockAttempt.locked", lockMetrics.getAndResetAcquireSucceededCount(), context); metric.set("lockAttempt.release", lockMetrics.getAndResetReleaseCount(), context); metric.set("lockAttempt.releaseFailed", lockMetrics.getAndResetReleaseFailedCount(), context); metric.set("lockAttempt.reentry", lockMetrics.getAndResetReentryCount(), context); metric.set("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); metric.set("lockAttempt.nakedRelease", lockMetrics.getAndResetNakedReleaseCount(), context); metric.set("lockAttempt.acquireWithoutRelease", lockMetrics.getAndResetAcquireWithoutReleaseCount(), context); metric.set("lockAttempt.foreignRelease", lockMetrics.getAndResetForeignReleaseCount(), context); setLockLatencyMetrics("acquire", lockMetrics.getAndResetAcquireLatencyMetrics(), context); setLockLatencyMetrics("locked", lockMetrics.getAndResetLockedLatencyMetrics(), context); }); } private void setLockLatencyMetrics(String name, LatencyMetrics latencyMetrics, Metric.Context context) { metric.set("lockAttempt." + name + "Latency", latencyMetrics.latencySeconds(), context); metric.set("lockAttempt." + name + "MaxActiveLatency", latencyMetrics.maxActiveLatencySeconds(), context); metric.set("lockAttempt." + name + "Hz", latencyMetrics.startHz(), context); metric.set("lockAttempt." + name + "Load", latencyMetrics.load(), context); } private void updateDockerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private void updateRepairTicketMetrics(NodeList nodes) { nodes.nodeType(NodeType.host).stream() .map(node -> node.reports().getReport("repairTicket")) .flatMap(Optional::stream) .map(report -> report.getInspector().field("status").asString()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())) .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status)))); } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node dockerHost) { return nodes.childrenOf(dockerHost).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(dockerHost.flavor().resources().justNumbers(), NodeResources::subtract); } private static class ClusterKey { private final ApplicationId application; private final ClusterSpec.Id cluster; public ClusterKey(ApplicationId application, ClusterSpec.Id cluster) { this.application = application; this.cluster = cluster; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClusterKey that = (ClusterKey) o; return application.equals(that.application) && cluster.equals(that.cluster); } @Override public int hashCode() { return Objects.hash(application, cluster); } } }