comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Yes, there are no exclave deployments currently and that method will return false since the default cloud account is `CloudAccount.empty`, which will result in the old behavior
protected double maintain() { Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>(); Map<ZoneId, Set<CloudAccount>> accountsByZone = new HashMap<>(); controller().zoneRegistry().zonesIncludingSystem().reachable().zones().forEach(zone -> { tenantsByZone.put(zone.getVirtualId(), new HashSet<>(INFRASTRUCTURE_TENANTS)); accountsByZone.put(zone.getVirtualId(), new HashSet<>()); }); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.deployments().values()) { if (zoneRegistry.isEnclave(deployment.cloudAccount())) accountsByZone.get(deployment.zone()).add(deployment.cloudAccount()); else tenantsByZone.get(deployment.zone()).add(instance.id().tenant()); } } } int failures = 0; for (ZoneId zone : tenantsByZone.keySet()) { try { ArchiveUris zoneArchiveUris = nodeRepository.getArchiveUris(zone); Stream.of( tenantsByZone.get(zone).stream() .flatMap(tenant -> archiveBucketDb.archiveUriFor(zone, tenant, true) .filter(uri -> !uri.equals(zoneArchiveUris.tenantArchiveUris().get(tenant))) .map(uri -> ArchiveUriUpdate.setArchiveUriFor(tenant, uri)) .stream()), accountsByZone.get(zone).stream() .flatMap(account -> archiveBucketDb.archiveUriFor(zone, account, true) .filter(uri -> !uri.equals(zoneArchiveUris.accountArchiveUris().get(account))) .map(uri -> ArchiveUriUpdate.setArchiveUriFor(account, uri)) .stream()), zoneArchiveUris.tenantArchiveUris().keySet().stream() .filter(tenant -> !tenantsByZone.get(zone).contains(tenant)) .map(ArchiveUriUpdate::deleteArchiveUriFor), zoneArchiveUris.accountArchiveUris().keySet().stream() .filter(account -> !accountsByZone.get(zone).contains(account)) .map(ArchiveUriUpdate::deleteArchiveUriFor)) .flatMap(s -> s) .forEach(update -> nodeRepository.updateArchiveUri(zone, update)); } catch (Exception e) { log.log(Level.WARNING, "Failed to update archive URI in " + zone + ". Retrying in " + interval() + ". Error: " + Exceptions.toMessageString(e)); failures++; } } return asSuccessFactor(tenantsByZone.size(), failures); }
else tenantsByZone.get(deployment.zone()).add(instance.id().tenant());
protected double maintain() { Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>(); Map<ZoneId, Set<CloudAccount>> accountsByZone = new HashMap<>(); controller().zoneRegistry().zonesIncludingSystem().reachable().zones().forEach(zone -> { tenantsByZone.put(zone.getVirtualId(), new HashSet<>(INFRASTRUCTURE_TENANTS)); accountsByZone.put(zone.getVirtualId(), new HashSet<>()); }); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.deployments().values()) { if (zoneRegistry.isEnclave(deployment.cloudAccount())) accountsByZone.get(deployment.zone()).add(deployment.cloudAccount()); else tenantsByZone.get(deployment.zone()).add(instance.id().tenant()); } } } int failures = 0; for (ZoneId zone : tenantsByZone.keySet()) { try { ArchiveUris zoneArchiveUris = nodeRepository.getArchiveUris(zone); Stream.of( tenantsByZone.get(zone).stream() .flatMap(tenant -> archiveBucketDb.archiveUriFor(zone, tenant, true) .filter(uri -> !uri.equals(zoneArchiveUris.tenantArchiveUris().get(tenant))) .map(uri -> ArchiveUriUpdate.setArchiveUriFor(tenant, uri)) .stream()), accountsByZone.get(zone).stream() .flatMap(account -> archiveBucketDb.archiveUriFor(zone, account, true) .filter(uri -> !uri.equals(zoneArchiveUris.accountArchiveUris().get(account))) .map(uri -> ArchiveUriUpdate.setArchiveUriFor(account, uri)) .stream()), zoneArchiveUris.tenantArchiveUris().keySet().stream() .filter(tenant -> !tenantsByZone.get(zone).contains(tenant)) .map(ArchiveUriUpdate::deleteArchiveUriFor), zoneArchiveUris.accountArchiveUris().keySet().stream() .filter(account -> !accountsByZone.get(zone).contains(account)) .map(ArchiveUriUpdate::deleteArchiveUriFor)) .flatMap(s -> s) .forEach(update -> nodeRepository.updateArchiveUri(zone, update)); } catch (Exception e) { log.log(Level.WARNING, "Failed to update archive URI in " + zone + ". Retrying in " + interval() + ". Error: " + Exceptions.toMessageString(e)); failures++; } } return asSuccessFactor(tenantsByZone.size(), failures); }
class ArchiveUriUpdater extends ControllerMaintainer { private static final Set<TenantName> INFRASTRUCTURE_TENANTS = Set.of(SystemApplication.TENANT); private final ApplicationController applications; private final NodeRepository nodeRepository; private final CuratorArchiveBucketDb archiveBucketDb; private final ZoneRegistry zoneRegistry; public ArchiveUriUpdater(Controller controller, Duration interval) { super(controller, interval); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); this.archiveBucketDb = controller.archiveBucketDb(); this.zoneRegistry = controller.zoneRegistry(); } @Override }
class ArchiveUriUpdater extends ControllerMaintainer { private static final Set<TenantName> INFRASTRUCTURE_TENANTS = Set.of(SystemApplication.TENANT); private final ApplicationController applications; private final NodeRepository nodeRepository; private final CuratorArchiveBucketDb archiveBucketDb; private final ZoneRegistry zoneRegistry; public ArchiveUriUpdater(Controller controller, Duration interval) { super(controller, interval); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); this.archiveBucketDb = controller.archiveBucketDb(); this.zoneRegistry = controller.zoneRegistry(); } @Override }
This was actually a bug-fix :)
public LoadBalancerInstance with(Set<Real> reals, ZoneEndpoint settings, Optional<PrivateServiceId> serviceId) { List<PrivateServiceId> ids = new ArrayList<>(serviceIds); serviceId.filter(id -> ! ids.contains(id)).ifPresent(ids::add); return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, ids, cloudAccount); }
List<PrivateServiceId> ids = new ArrayList<>(serviceIds);
public LoadBalancerInstance with(Set<Real> reals, ZoneEndpoint settings, Optional<PrivateServiceId> serviceId) { List<PrivateServiceId> ids = new ArrayList<>(serviceIds); serviceId.filter(id -> ! ids.contains(id)).ifPresent(ids::add); return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, ids, cloudAccount); }
class LoadBalancerInstance { private final Optional<DomainName> hostname; private final Optional<String> ipAddress; private final Optional<DnsZone> dnsZone; private final Set<Integer> ports; private final Set<String> networks; private final Set<Real> reals; private final ZoneEndpoint settings; private final List<PrivateServiceId> serviceIds; private final CloudAccount cloudAccount; public LoadBalancerInstance(Optional<DomainName> hostname, Optional<String> ipAddress, Optional<DnsZone> dnsZone, Set<Integer> ports, Set<String> networks, Set<Real> reals, ZoneEndpoint settings, List<PrivateServiceId> serviceIds, CloudAccount cloudAccount) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this.ipAddress = Objects.requireNonNull(ipAddress, "ip must be non-null"); this.dnsZone = Objects.requireNonNull(dnsZone, "dnsZone must be non-null"); this.ports = ImmutableSortedSet.copyOf(requirePorts(ports)); this.networks = ImmutableSortedSet.copyOf(Objects.requireNonNull(networks, "networks must be non-null")); this.reals = ImmutableSortedSet.copyOf(Objects.requireNonNull(reals, "targets must be non-null")); this.settings = Objects.requireNonNull(settings, "settings must be non-null"); this.serviceIds = List.copyOf(Objects.requireNonNull(serviceIds, "private service id must be non-null")); this.cloudAccount = Objects.requireNonNull(cloudAccount, "cloudAccount must be non-null"); if (hostname.isEmpty() == ipAddress.isEmpty()) { throw new IllegalArgumentException("Exactly 1 of hostname=%s and ipAddress=%s must be set".formatted( hostname.map(DomainName::value).orElse("<empty>"), ipAddress.orElse("<empty>"))); } } /** Fully-qualified domain name of this load balancer. This hostname can be used for query and feed */ public Optional<DomainName> hostname() { return hostname; } /** IP address of this (public) load balancer */ public Optional<String> ipAddress() { return ipAddress; } /** ID of the DNS zone associated with this */ public Optional<DnsZone> dnsZone() { return dnsZone; } /** Listening port(s) of this load balancer */ public Set<Integer> ports() { return ports; } /** Networks (CIDR blocks) of this load balancer */ public Set<String> networks() { return networks; } /** Real servers behind this load balancer */ public Set<Real> reals() { return reals; } /** Static user-configured settings of this load balancer */ public ZoneEndpoint settings() { return settings; } /** ID of any private endpoint service configured for this load balancer. */ public Optional<PrivateServiceId> serviceId() { return serviceIds.isEmpty() ? Optional.empty() : Optional.of(serviceIds.get(serviceIds.size() - 1)); } public List<PrivateServiceId> serviceIds() { return serviceIds; } /** Cloud account of this load balancer */ public CloudAccount cloudAccount() { return cloudAccount; } private static Set<Integer> requirePorts(Set<Integer> ports) { Objects.requireNonNull(ports, "ports must be non-null"); if (ports.isEmpty()) { throw new IllegalArgumentException("ports must be non-empty"); } if (!ports.stream().allMatch(port -> port >= 1 && port <= 65535)) { throw new IllegalArgumentException("all ports must be >= 1 and <= 65535"); } return ports; } /** Updates this with new data, from a reconfiguration. */ /** Prepends the given service IDs, possibly changing the order of those we have in this. */ public LoadBalancerInstance withServiceIds(List<PrivateServiceId> serviceIds) { List<PrivateServiceId> ids = new ArrayList<>(serviceIds); for (PrivateServiceId id : this.serviceIds) if ( ! ids.contains(id)) ids.add(id); return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, ids, cloudAccount); } }
class LoadBalancerInstance { private final Optional<DomainName> hostname; private final Optional<String> ipAddress; private final Optional<DnsZone> dnsZone; private final Set<Integer> ports; private final Set<String> networks; private final Set<Real> reals; private final ZoneEndpoint settings; private final List<PrivateServiceId> serviceIds; private final CloudAccount cloudAccount; public LoadBalancerInstance(Optional<DomainName> hostname, Optional<String> ipAddress, Optional<DnsZone> dnsZone, Set<Integer> ports, Set<String> networks, Set<Real> reals, ZoneEndpoint settings, List<PrivateServiceId> serviceIds, CloudAccount cloudAccount) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this.ipAddress = Objects.requireNonNull(ipAddress, "ip must be non-null"); this.dnsZone = Objects.requireNonNull(dnsZone, "dnsZone must be non-null"); this.ports = ImmutableSortedSet.copyOf(requirePorts(ports)); this.networks = ImmutableSortedSet.copyOf(Objects.requireNonNull(networks, "networks must be non-null")); this.reals = ImmutableSortedSet.copyOf(Objects.requireNonNull(reals, "targets must be non-null")); this.settings = Objects.requireNonNull(settings, "settings must be non-null"); this.serviceIds = List.copyOf(Objects.requireNonNull(serviceIds, "private service id must be non-null")); this.cloudAccount = Objects.requireNonNull(cloudAccount, "cloudAccount must be non-null"); if (hostname.isEmpty() == ipAddress.isEmpty()) { throw new IllegalArgumentException("Exactly 1 of hostname=%s and ipAddress=%s must be set".formatted( hostname.map(DomainName::value).orElse("<empty>"), ipAddress.orElse("<empty>"))); } } /** Fully-qualified domain name of this load balancer. This hostname can be used for query and feed */ public Optional<DomainName> hostname() { return hostname; } /** IP address of this (public) load balancer */ public Optional<String> ipAddress() { return ipAddress; } /** ID of the DNS zone associated with this */ public Optional<DnsZone> dnsZone() { return dnsZone; } /** Listening port(s) of this load balancer */ public Set<Integer> ports() { return ports; } /** Networks (CIDR blocks) of this load balancer */ public Set<String> networks() { return networks; } /** Real servers behind this load balancer */ public Set<Real> reals() { return reals; } /** Static user-configured settings of this load balancer */ public ZoneEndpoint settings() { return settings; } /** ID of any private endpoint service configured for this load balancer. */ public Optional<PrivateServiceId> serviceId() { return serviceIds.isEmpty() ? Optional.empty() : Optional.of(serviceIds.get(serviceIds.size() - 1)); } public List<PrivateServiceId> serviceIds() { return serviceIds; } /** Cloud account of this load balancer */ public CloudAccount cloudAccount() { return cloudAccount; } private static Set<Integer> requirePorts(Set<Integer> ports) { Objects.requireNonNull(ports, "ports must be non-null"); if (ports.isEmpty()) { throw new IllegalArgumentException("ports must be non-empty"); } if (!ports.stream().allMatch(port -> port >= 1 && port <= 65535)) { throw new IllegalArgumentException("all ports must be >= 1 and <= 65535"); } return ports; } /** Updates this with new data, from a reconfiguration. */ /** Prepends the given service IDs, possibly changing the order of those we have in this. */ public LoadBalancerInstance withServiceIds(List<PrivateServiceId> serviceIds) { List<PrivateServiceId> ids = new ArrayList<>(serviceIds); for (PrivateServiceId id : this.serviceIds) if ( ! ids.contains(id)) ids.add(id); return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, ids, cloudAccount); } }
Out of curiosity, where did the test fail?
void application_generation_metric() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(ComponentWithMetrics.class))))) { var component = (ComponentWithMetrics)app.getComponentById(ComponentWithMetrics.class.getName()); assertNotNull(component); var metrics = (SimpleMetricConsumer)component.metrics().newInstance(); assertNotNull(metrics); int maxWaitMs = 10000; Bucket snapshot = null; while (maxWaitMs-- > 0 && ( snapshot = metrics.receiver().getSnapshot() ) == null) { Thread.sleep(1); } assertNotNull(snapshot); assertEquals(1, snapshot.getValuesForMetric("application_generation").size()); assertEquals(0, snapshot.getValuesForMetric("application_generation").iterator().next().getValue().getLast()); } }
assertEquals(0, snapshot.getValuesForMetric("application_generation").iterator().next().getValue().getLast());
void application_generation_metric() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(ComponentWithMetrics.class))))) { var component = (ComponentWithMetrics)app.getComponentById(ComponentWithMetrics.class.getName()); assertNotNull(component); var metrics = (SimpleMetricConsumer)component.metrics().newInstance(); assertNotNull(metrics); int maxWaitMs = 10000; Bucket snapshot = null; while (maxWaitMs-- > 0 && ( snapshot = metrics.receiver().getSnapshot() ) == null) { Thread.sleep(1); } assertNotNull(snapshot); assertEquals(1, snapshot.getValuesForMetric("application_generation").size()); assertEquals(0, snapshot.getValuesForMetric("application_generation").iterator().next().getValue().getLast()); } }
class ApplicationTest { @Test void minimal_application_can_be_constructed() { try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) { Application unused = application; } } /** Tests that an application with search chains referencing a content cluster can be constructed. */ @Test void container_and_referenced_content() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) { Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), new Query("?query=substring:foobar&timeout=20000")); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); } } @Test void application_with_query_profile_sets_up_query_profile_registry() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withqueryprofile"), Networking.disable)) { Query query = new Query(HttpRequest.createTestRequest("?query=substring:foobar&timeout=20000", com.yahoo.jdisc.http.HttpRequest.Method.GET), application.getCompiledQueryProfileRegistry().findQueryProfile("default")); Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), query); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); assertEquals("2", application.getCompiledQueryProfileRegistry().findQueryProfile("default").get("hits")); assertEquals("select * from sources * where weakAnd(substring contains \"foobar\") limit 2 timeout 20000000", result.getQuery().yqlRepresentation(true)); } } private void printTrace(Result result) { for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class)) System.out.println(message); } @Test void empty_container() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) { try { app.process(new DocumentRemove(null)); fail("expected exception"); } catch (Exception ignore) { } try { app.process(new Processing()); fail("expected exception"); } catch (Exception ignore) { } try { app.search(new Query("?foo")); fail("expected exception"); } catch (Exception ignore) { } } } @Test void config() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .documentProcessor("docproc", "default", MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder() .mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2")) .mylist("item1") .mylist("item2") .mymap("key1", "value1") .mymap("key2", "value2") .mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1")) .mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2"))))))) ) { MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default"); assertNotNull(docproc); assertEquals(docproc.getConfig().mystruct().id(), "structid"); assertEquals(docproc.getConfig().mystruct().value(), "structvalue"); assertEquals(docproc.getConfig().mystructlist().size(), 2); assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1"); assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1"); assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2"); assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2"); assertEquals(docproc.getConfig().mylist().size(), 2); assertEquals(docproc.getConfig().mylist().get(0), "item1"); assertEquals(docproc.getConfig().mylist().get(1), "item2"); assertEquals(docproc.getConfig().mymap().size(), 2); assertTrue(docproc.getConfig().mymap().containsKey("key1")); assertEquals(docproc.getConfig().mymap().get("key1"), "value1"); assertTrue(docproc.getConfig().mymap().containsKey("key2")); assertEquals(docproc.getConfig().mymap().get("key2"), "value2"); assertEquals(docproc.getConfig().mymapstruct().size(), 2); assertTrue(docproc.getConfig().mymapstruct().containsKey("key1")); assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1"); assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1"); assertTrue(docproc.getConfig().mymapstruct().containsKey("key2")); assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2"); assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2"); } } @Test void handler() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .handler("http: ) { RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName()); assertNotNull(handler); Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); } } @Test void renderer() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .renderer("mock", MockRenderer.class)))) ) { Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />"); } } @Test void search_default() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher(MockSearcher.class)))) ) { Result result = app.search(new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void search() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher("foo", MockSearcher.class)))) ) { Result result = app.search("foo", new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void document_type() throws Exception { try ( Application app = Application.fromBuilder(new Application.Builder() .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .documentProcessor(MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")))))) ) { Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes(); assertNotNull(typeMap); assertTrue(typeMap.containsKey("test")); } } @Test void get_search_handler() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) { SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler"); assertNotNull(searchHandler); } } @Test void component() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(MockSearcher.class))))) { Component c = app.getComponentById(MockSearcher.class.getName()); assertNotNull(c); } } @Test @Test void component_with_config() throws Exception { MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))); try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component("foo", MockDocproc.class, config))))) { Component c = app.getComponentById("foo"); assertNotNull(c); } } @Test void file_distribution() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) { Application unused = application; } } @Test void server() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .server("foo", MockServer.class))) )) { MockServer server = (MockServer) app.getServerById("foo"); assertNotNull(server); assertTrue(server.isStarted()); } } @Test void query_profile() throws Exception { try (Application app = Application.fromBuilder(new Application.Builder() .queryProfile("default", "<query-profile id=\"default\">\n" + "<field name=\"defaultage\">7d</field>\n" + "</query-profile>") .queryProfileType("type", "<query-profile-type id=\"type\">\n" + "<field name=\"defaultage\" type=\"string\" />\n" + "</query-profile-type>") .rankExpression("re", "commonfirstphase(globalstaticrank)") .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .search(true) ))) { Application unused = app; } } @Test void http_interface_is_off_when_networking_is_disabled() throws Exception { assertThrows(ConnectException.class, () -> { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); int statusCode = client.execute(new HttpGet("http: fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode); Application unused = application; } }); } @Test void http_interface_is_on_when_networking_is_enabled() throws Exception { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); HttpResponse response = client.execute(new HttpGet("http: assertEquals(200, response.getStatusLine().getStatusCode()); BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); String line; StringBuilder sb = new StringBuilder(); while ((line = r.readLine()) != null) { sb.append(line).append("\n"); } assertTrue(sb.toString().contains("Handler")); Application unused = application; } } @Test void athenz_in_deployment_xml() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) { Application unused = application; } } private static int getFreePort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); return socket.getLocalPort(); } } private static String servicesXmlWithServer(int port) { return "<container version='1.0'>" + " <http> <server port='" + port +"' id='foo'/> </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } @Test void application_with_access_control_can_be_constructed() { try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) { Application unused = application; } } private static String servicesXmlWithAccessControl() { return "<container version='1.0'>" + " <http> <server port='" + 0 +"' id='foo'/> " + " <filtering>" + " <access-control domain='foo' />" + " </filtering>" + " </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } }
class ApplicationTest { @Test void minimal_application_can_be_constructed() { try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) { Application unused = application; } } /** Tests that an application with search chains referencing a content cluster can be constructed. */ @Test void container_and_referenced_content() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) { Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), new Query("?query=substring:foobar&timeout=20000")); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); } } @Test void application_with_query_profile_sets_up_query_profile_registry() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withqueryprofile"), Networking.disable)) { Query query = new Query(HttpRequest.createTestRequest("?query=substring:foobar&timeout=20000", com.yahoo.jdisc.http.HttpRequest.Method.GET), application.getCompiledQueryProfileRegistry().findQueryProfile("default")); Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), query); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); assertEquals("2", application.getCompiledQueryProfileRegistry().findQueryProfile("default").get("hits")); assertEquals("select * from sources * where weakAnd(substring contains \"foobar\") limit 2 timeout 20000000", result.getQuery().yqlRepresentation(true)); } } private void printTrace(Result result) { for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class)) System.out.println(message); } @Test void empty_container() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) { try { app.process(new DocumentRemove(null)); fail("expected exception"); } catch (Exception ignore) { } try { app.process(new Processing()); fail("expected exception"); } catch (Exception ignore) { } try { app.search(new Query("?foo")); fail("expected exception"); } catch (Exception ignore) { } } } @Test void config() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .documentProcessor("docproc", "default", MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder() .mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2")) .mylist("item1") .mylist("item2") .mymap("key1", "value1") .mymap("key2", "value2") .mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1")) .mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2"))))))) ) { MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default"); assertNotNull(docproc); assertEquals(docproc.getConfig().mystruct().id(), "structid"); assertEquals(docproc.getConfig().mystruct().value(), "structvalue"); assertEquals(docproc.getConfig().mystructlist().size(), 2); assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1"); assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1"); assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2"); assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2"); assertEquals(docproc.getConfig().mylist().size(), 2); assertEquals(docproc.getConfig().mylist().get(0), "item1"); assertEquals(docproc.getConfig().mylist().get(1), "item2"); assertEquals(docproc.getConfig().mymap().size(), 2); assertTrue(docproc.getConfig().mymap().containsKey("key1")); assertEquals(docproc.getConfig().mymap().get("key1"), "value1"); assertTrue(docproc.getConfig().mymap().containsKey("key2")); assertEquals(docproc.getConfig().mymap().get("key2"), "value2"); assertEquals(docproc.getConfig().mymapstruct().size(), 2); assertTrue(docproc.getConfig().mymapstruct().containsKey("key1")); assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1"); assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1"); assertTrue(docproc.getConfig().mymapstruct().containsKey("key2")); assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2"); assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2"); } } @Test void handler() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .handler("http: ) { RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName()); assertNotNull(handler); Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); } } @Test void renderer() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .renderer("mock", MockRenderer.class)))) ) { Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />"); } } @Test void search_default() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher(MockSearcher.class)))) ) { Result result = app.search(new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void search() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher("foo", MockSearcher.class)))) ) { Result result = app.search("foo", new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void document_type() throws Exception { try ( Application app = Application.fromBuilder(new Application.Builder() .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .documentProcessor(MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")))))) ) { Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes(); assertNotNull(typeMap); assertTrue(typeMap.containsKey("test")); } } @Test void get_search_handler() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) { SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler"); assertNotNull(searchHandler); } } @Test void component() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(MockSearcher.class))))) { Component c = app.getComponentById(MockSearcher.class.getName()); assertNotNull(c); } } @Test @Test void component_with_config() throws Exception { MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))); try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component("foo", MockDocproc.class, config))))) { Component c = app.getComponentById("foo"); assertNotNull(c); } } @Test void file_distribution() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) { Application unused = application; } } @Test void server() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .server("foo", MockServer.class))) )) { MockServer server = (MockServer) app.getServerById("foo"); assertNotNull(server); assertTrue(server.isStarted()); } } @Test void query_profile() throws Exception { try (Application app = Application.fromBuilder(new Application.Builder() .queryProfile("default", "<query-profile id=\"default\">\n" + "<field name=\"defaultage\">7d</field>\n" + "</query-profile>") .queryProfileType("type", "<query-profile-type id=\"type\">\n" + "<field name=\"defaultage\" type=\"string\" />\n" + "</query-profile-type>") .rankExpression("re", "commonfirstphase(globalstaticrank)") .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .search(true) ))) { Application unused = app; } } @Test void http_interface_is_off_when_networking_is_disabled() throws Exception { assertThrows(ConnectException.class, () -> { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); int statusCode = client.execute(new HttpGet("http: fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode); Application unused = application; } }); } @Test void http_interface_is_on_when_networking_is_enabled() throws Exception { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); HttpResponse response = client.execute(new HttpGet("http: assertEquals(200, response.getStatusLine().getStatusCode()); BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); String line; StringBuilder sb = new StringBuilder(); while ((line = r.readLine()) != null) { sb.append(line).append("\n"); } assertTrue(sb.toString().contains("Handler")); Application unused = application; } } @Test void athenz_in_deployment_xml() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) { Application unused = application; } } private static int getFreePort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); return socket.getLocalPort(); } } private static String servicesXmlWithServer(int port) { return "<container version='1.0'>" + " <http> <server port='" + port +"' id='foo'/> </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } @Test void application_with_access_control_can_be_constructed() { try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) { Application unused = application; } } private static String servicesXmlWithAccessControl() { return "<container version='1.0'>" + " <http> <server port='" + 0 +"' id='foo'/> " + " <filtering>" + " <access-control domain='foo' />" + " </filtering>" + " </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } }
On 282 - it never got the metric. In a real setup we still never get the metric any more though, so this didn't solve the problem.
void application_generation_metric() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(ComponentWithMetrics.class))))) { var component = (ComponentWithMetrics)app.getComponentById(ComponentWithMetrics.class.getName()); assertNotNull(component); var metrics = (SimpleMetricConsumer)component.metrics().newInstance(); assertNotNull(metrics); int maxWaitMs = 10000; Bucket snapshot = null; while (maxWaitMs-- > 0 && ( snapshot = metrics.receiver().getSnapshot() ) == null) { Thread.sleep(1); } assertNotNull(snapshot); assertEquals(1, snapshot.getValuesForMetric("application_generation").size()); assertEquals(0, snapshot.getValuesForMetric("application_generation").iterator().next().getValue().getLast()); } }
assertEquals(0, snapshot.getValuesForMetric("application_generation").iterator().next().getValue().getLast());
void application_generation_metric() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(ComponentWithMetrics.class))))) { var component = (ComponentWithMetrics)app.getComponentById(ComponentWithMetrics.class.getName()); assertNotNull(component); var metrics = (SimpleMetricConsumer)component.metrics().newInstance(); assertNotNull(metrics); int maxWaitMs = 10000; Bucket snapshot = null; while (maxWaitMs-- > 0 && ( snapshot = metrics.receiver().getSnapshot() ) == null) { Thread.sleep(1); } assertNotNull(snapshot); assertEquals(1, snapshot.getValuesForMetric("application_generation").size()); assertEquals(0, snapshot.getValuesForMetric("application_generation").iterator().next().getValue().getLast()); } }
class ApplicationTest { @Test void minimal_application_can_be_constructed() { try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) { Application unused = application; } } /** Tests that an application with search chains referencing a content cluster can be constructed. */ @Test void container_and_referenced_content() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) { Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), new Query("?query=substring:foobar&timeout=20000")); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); } } @Test void application_with_query_profile_sets_up_query_profile_registry() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withqueryprofile"), Networking.disable)) { Query query = new Query(HttpRequest.createTestRequest("?query=substring:foobar&timeout=20000", com.yahoo.jdisc.http.HttpRequest.Method.GET), application.getCompiledQueryProfileRegistry().findQueryProfile("default")); Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), query); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); assertEquals("2", application.getCompiledQueryProfileRegistry().findQueryProfile("default").get("hits")); assertEquals("select * from sources * where weakAnd(substring contains \"foobar\") limit 2 timeout 20000000", result.getQuery().yqlRepresentation(true)); } } private void printTrace(Result result) { for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class)) System.out.println(message); } @Test void empty_container() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) { try { app.process(new DocumentRemove(null)); fail("expected exception"); } catch (Exception ignore) { } try { app.process(new Processing()); fail("expected exception"); } catch (Exception ignore) { } try { app.search(new Query("?foo")); fail("expected exception"); } catch (Exception ignore) { } } } @Test void config() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .documentProcessor("docproc", "default", MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder() .mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2")) .mylist("item1") .mylist("item2") .mymap("key1", "value1") .mymap("key2", "value2") .mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1")) .mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2"))))))) ) { MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default"); assertNotNull(docproc); assertEquals(docproc.getConfig().mystruct().id(), "structid"); assertEquals(docproc.getConfig().mystruct().value(), "structvalue"); assertEquals(docproc.getConfig().mystructlist().size(), 2); assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1"); assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1"); assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2"); assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2"); assertEquals(docproc.getConfig().mylist().size(), 2); assertEquals(docproc.getConfig().mylist().get(0), "item1"); assertEquals(docproc.getConfig().mylist().get(1), "item2"); assertEquals(docproc.getConfig().mymap().size(), 2); assertTrue(docproc.getConfig().mymap().containsKey("key1")); assertEquals(docproc.getConfig().mymap().get("key1"), "value1"); assertTrue(docproc.getConfig().mymap().containsKey("key2")); assertEquals(docproc.getConfig().mymap().get("key2"), "value2"); assertEquals(docproc.getConfig().mymapstruct().size(), 2); assertTrue(docproc.getConfig().mymapstruct().containsKey("key1")); assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1"); assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1"); assertTrue(docproc.getConfig().mymapstruct().containsKey("key2")); assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2"); assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2"); } } @Test void handler() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .handler("http: ) { RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName()); assertNotNull(handler); Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); } } @Test void renderer() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .renderer("mock", MockRenderer.class)))) ) { Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />"); } } @Test void search_default() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher(MockSearcher.class)))) ) { Result result = app.search(new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void search() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher("foo", MockSearcher.class)))) ) { Result result = app.search("foo", new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void document_type() throws Exception { try ( Application app = Application.fromBuilder(new Application.Builder() .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .documentProcessor(MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")))))) ) { Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes(); assertNotNull(typeMap); assertTrue(typeMap.containsKey("test")); } } @Test void get_search_handler() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) { SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler"); assertNotNull(searchHandler); } } @Test void component() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(MockSearcher.class))))) { Component c = app.getComponentById(MockSearcher.class.getName()); assertNotNull(c); } } @Test @Test void component_with_config() throws Exception { MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))); try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component("foo", MockDocproc.class, config))))) { Component c = app.getComponentById("foo"); assertNotNull(c); } } @Test void file_distribution() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) { Application unused = application; } } @Test void server() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .server("foo", MockServer.class))) )) { MockServer server = (MockServer) app.getServerById("foo"); assertNotNull(server); assertTrue(server.isStarted()); } } @Test void query_profile() throws Exception { try (Application app = Application.fromBuilder(new Application.Builder() .queryProfile("default", "<query-profile id=\"default\">\n" + "<field name=\"defaultage\">7d</field>\n" + "</query-profile>") .queryProfileType("type", "<query-profile-type id=\"type\">\n" + "<field name=\"defaultage\" type=\"string\" />\n" + "</query-profile-type>") .rankExpression("re", "commonfirstphase(globalstaticrank)") .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .search(true) ))) { Application unused = app; } } @Test void http_interface_is_off_when_networking_is_disabled() throws Exception { assertThrows(ConnectException.class, () -> { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); int statusCode = client.execute(new HttpGet("http: fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode); Application unused = application; } }); } @Test void http_interface_is_on_when_networking_is_enabled() throws Exception { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); HttpResponse response = client.execute(new HttpGet("http: assertEquals(200, response.getStatusLine().getStatusCode()); BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); String line; StringBuilder sb = new StringBuilder(); while ((line = r.readLine()) != null) { sb.append(line).append("\n"); } assertTrue(sb.toString().contains("Handler")); Application unused = application; } } @Test void athenz_in_deployment_xml() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) { Application unused = application; } } private static int getFreePort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); return socket.getLocalPort(); } } private static String servicesXmlWithServer(int port) { return "<container version='1.0'>" + " <http> <server port='" + port +"' id='foo'/> </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } @Test void application_with_access_control_can_be_constructed() { try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) { Application unused = application; } } private static String servicesXmlWithAccessControl() { return "<container version='1.0'>" + " <http> <server port='" + 0 +"' id='foo'/> " + " <filtering>" + " <access-control domain='foo' />" + " </filtering>" + " </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } }
class ApplicationTest { @Test void minimal_application_can_be_constructed() { try (Application application = Application.fromServicesXml("<container version=\"1.0\"/>", Networking.disable)) { Application unused = application; } } /** Tests that an application with search chains referencing a content cluster can be constructed. */ @Test void container_and_referenced_content() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withcontent"), Networking.disable)) { Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), new Query("?query=substring:foobar&timeout=20000")); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); } } @Test void application_with_query_profile_sets_up_query_profile_registry() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/withqueryprofile"), Networking.disable)) { Query query = new Query(HttpRequest.createTestRequest("?query=substring:foobar&timeout=20000", com.yahoo.jdisc.http.HttpRequest.Method.GET), application.getCompiledQueryProfileRegistry().findQueryProfile("default")); Result result = application.getJDisc("default").search().process(new ComponentSpecification("default"), query); assertEquals("WEAKAND(100) (AND substring:fo substring:oo substring:ob substring:ba substring:ar)", result.hits().get("hasQuery").getQuery().getModel().getQueryTree().toString()); assertEquals("2", application.getCompiledQueryProfileRegistry().findQueryProfile("default").get("hits")); assertEquals("select * from sources * where weakAnd(substring contains \"foobar\") limit 2 timeout 20000000", result.getQuery().yqlRepresentation(true)); } } private void printTrace(Result result) { for (String message : result.getQuery().getContext(true).getTrace().traceNode().descendants(String.class)) System.out.println(message); } @Test void empty_container() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container())))) { try { app.process(new DocumentRemove(null)); fail("expected exception"); } catch (Exception ignore) { } try { app.process(new Processing()); fail("expected exception"); } catch (Exception ignore) { } try { app.search(new Query("?foo")); fail("expected exception"); } catch (Exception ignore) { } } } @Test void config() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .documentProcessor("docproc", "default", MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder() .mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2")) .mylist("item1") .mylist("item2") .mymap("key1", "value1") .mymap("key2", "value2") .mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1")) .mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2"))))))) ) { MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default"); assertNotNull(docproc); assertEquals(docproc.getConfig().mystruct().id(), "structid"); assertEquals(docproc.getConfig().mystruct().value(), "structvalue"); assertEquals(docproc.getConfig().mystructlist().size(), 2); assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1"); assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1"); assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2"); assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2"); assertEquals(docproc.getConfig().mylist().size(), 2); assertEquals(docproc.getConfig().mylist().get(0), "item1"); assertEquals(docproc.getConfig().mylist().get(1), "item2"); assertEquals(docproc.getConfig().mymap().size(), 2); assertTrue(docproc.getConfig().mymap().containsKey("key1")); assertEquals(docproc.getConfig().mymap().get("key1"), "value1"); assertTrue(docproc.getConfig().mymap().containsKey("key2")); assertEquals(docproc.getConfig().mymap().get("key2"), "value2"); assertEquals(docproc.getConfig().mymapstruct().size(), 2); assertTrue(docproc.getConfig().mymapstruct().containsKey("key1")); assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1"); assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1"); assertTrue(docproc.getConfig().mymapstruct().containsKey("key2")); assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2"); assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2"); } } @Test void handler() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .handler("http: ) { RequestHandler handler = app.getRequestHandlerById(MockHttpHandler.class.getName()); assertNotNull(handler); Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); request = new Request("http: response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "OK"); } } @Test void renderer() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .renderer("mock", MockRenderer.class)))) ) { Request request = new Request("http: Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />"); } } @Test void search_default() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher(MockSearcher.class)))) ) { Result result = app.search(new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void search() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher("foo", MockSearcher.class)))) ) { Result result = app.search("foo", new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } } @Test void document_type() throws Exception { try ( Application app = Application.fromBuilder(new Application.Builder() .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .documentProcessor(MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar")))))) ) { Map<String, DocumentType> typeMap = app.getJDisc("jdisc").documentProcessing().getDocumentTypes(); assertNotNull(typeMap); assertTrue(typeMap.containsKey("test")); } } @Test void get_search_handler() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container().search(true))))) { SearchHandler searchHandler = (SearchHandler) app.getRequestHandlerById("com.yahoo.search.handler.SearchHandler"); assertNotNull(searchHandler); } } @Test void component() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(MockSearcher.class))))) { Component c = app.getComponentById(MockSearcher.class.getName()); assertNotNull(c); } } @Test @Test void component_with_config() throws Exception { MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))); try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component("foo", MockDocproc.class, config))))) { Component c = app.getComponentById("foo"); assertNotNull(c); } } @Test void file_distribution() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/filedistribution/"), Networking.disable)) { Application unused = application; } } @Test void server() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .server("foo", MockServer.class))) )) { MockServer server = (MockServer) app.getServerById("foo"); assertNotNull(server); assertTrue(server.isStarted()); } } @Test void query_profile() throws Exception { try (Application app = Application.fromBuilder(new Application.Builder() .queryProfile("default", "<query-profile id=\"default\">\n" + "<field name=\"defaultage\">7d</field>\n" + "</query-profile>") .queryProfileType("type", "<query-profile-type id=\"type\">\n" + "<field name=\"defaultage\" type=\"string\" />\n" + "</query-profile-type>") .rankExpression("re", "commonfirstphase(globalstaticrank)") .documentType("test", new String(this.getClass().getResourceAsStream("/test.sd").readAllBytes(), StandardCharsets.UTF_8)) .container("default", new Application.Builder.Container() .search(true) ))) { Application unused = app; } } @Test void http_interface_is_off_when_networking_is_disabled() throws Exception { assertThrows(ConnectException.class, () -> { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.disable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); int statusCode = client.execute(new HttpGet("http: fail("Networking.disable is specified, but the network interface is enabled! Got status code: " + statusCode); Application unused = application; } }); } @Test void http_interface_is_on_when_networking_is_enabled() throws Exception { int httpPort = getFreePort(); try (Application application = Application.fromServicesXml(servicesXmlWithServer(httpPort), Networking.enable)) { HttpClient client = new org.apache.http.impl.client.DefaultHttpClient(); HttpResponse response = client.execute(new HttpGet("http: assertEquals(200, response.getStatusLine().getStatusCode()); BufferedReader r = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); String line; StringBuilder sb = new StringBuilder(); while ((line = r.readLine()) != null) { sb.append(line).append("\n"); } assertTrue(sb.toString().contains("Handler")); Application unused = application; } } @Test void athenz_in_deployment_xml() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) { Application unused = application; } } private static int getFreePort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); return socket.getLocalPort(); } } private static String servicesXmlWithServer(int port) { return "<container version='1.0'>" + " <http> <server port='" + port +"' id='foo'/> </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } @Test void application_with_access_control_can_be_constructed() { try (Application application = Application.fromServicesXml(servicesXmlWithAccessControl(), Networking.disable)) { Application unused = application; } } private static String servicesXmlWithAccessControl() { return "<container version='1.0'>" + " <http> <server port='" + 0 +"' id='foo'/> " + " <filtering>" + " <access-control domain='foo' />" + " </filtering>" + " </http>" + " <accesslog type=\"disabled\" />" + "</container>"; } }
Do you mean that the tasks should not run if the container is restarted? Why? Tried to implement that in next commit. PTAL.
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: storageMaintainer.syncLogs(context, true); removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container, false); var wireguardTasksRun = false; if (container.isPresent() && container.get().state().isRunning()) { wireguardTasks.forEach(task -> task.converge(context)); wireguardTasksRun = true; } if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (! wireguardTasksRun) wireguardTasks.forEach(task -> task.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources().equalsCpu(getContainerResources(context))) throw ConvergenceException.ofTransient("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } serviceDumper.processServiceDumpRequest(context); updateNodeRepoWithCurrentAttributes(context, container.map(Container::createdAt)); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name()); } }
if (! wireguardTasksRun) wireguardTasks.forEach(task -> task.converge(context));
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: storageMaintainer.syncLogs(context, true); removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container, false); var runOrdinaryWireguardTasks = true; if (container.isPresent() && container.get().state().isRunning()) { wireguardTasks.forEach(task -> task.converge(context)); runOrdinaryWireguardTasks = false; } if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; runOrdinaryWireguardTasks = true; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (runOrdinaryWireguardTasks) wireguardTasks.forEach(task -> task.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources().equalsCpu(getContainerResources(context))) throw ConvergenceException.ofTransient("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } serviceDumper.processServiceDumpRequest(context); updateNodeRepoWithCurrentAttributes(context, container.map(Container::createdAt)); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name()); } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private final VespaServiceDumper serviceDumper; private final List<ContainerWireguardTask> wireguardTasks; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION, serviceDumper, wireguardTasks); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); this.serviceDumper = serviceDumper; this.wireguardTasks = new ArrayList<>(wireguardTasks); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { converge(contextSupplier.nextContext()); } catch (ContextSupplierInterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Invoking vespa-nodectl to start services"); String output = containerOperations.startServices(context); if (!output.isBlank()) { context.log(logger, "Start services output: " + output); } hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, "Invoking vespa-nodectl to resume services"); String output = containerOperations.resumeNode(context); if (!output.isBlank()) { context.log(logger, "Resume services output: " + output); } hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context, Optional<Instant> containerCreatedAt) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); boolean changed = false; if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); changed = true; } boolean createdAtAfterRebootedEvent = context.node().events().stream() .filter(event -> event.type().equals("rebooted")) .map(event -> containerCreatedAt .map(createdAt -> createdAt.isAfter(event.at())) .orElse(false)) .findFirst() .orElse(containerCreatedAt.isPresent()); if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration) || createdAtAfterRebootedEvent) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); changed = true; } Optional<DockerImage> wantedDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), wantedDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = wantedDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(context.node().currentVespaVersion().orElse(Version.emptyVersion)); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); changed = true; } if (changed) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentNodeAttributes, newNodeAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newNodeAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); ContainerData containerData = containerOperations.createContainer(context, wantedResources); writeContainerData(context, containerData); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason); orchestratorSuspendNode(context); ContainerResources currentResources = existingContainer.get().resources(); ContainerResources wantedResources = currentResources.withUnlimitedCpus(); if ( ! warmUpDuration(context).isNegative() && ! wantedResources.equals(currentResources)) { context.log(logger, "Updating container resources: %s -> %s", existingContainer.get().resources().toStringCpu(), wantedResources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.get().id(), wantedResources); } String output = containerOperations.restartVespa(context); if ( ! output.isBlank()) { context.log(logger, "Restart services output: " + output); } currentRestartGeneration = context.node().wantedRestartGeneration(); firstSuccessfulHealthCheckInstant = Optional.empty(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state().isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { if (containerState == ABSENT) return; try { hasResumedNode = false; context.log(logger, "Invoking vespa-nodectl to suspend services"); String output = containerOperations.suspendNode(context); if (!output.isBlank()) { context.log(logger, "Suspend services output: " + output); } } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image())) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image().asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state().isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources())) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources().toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state().isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() == NodeState.active) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources())) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources().toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources().memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev; } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); if (e.isError()) numberOfUnhandledException++; } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration.dividedBy(zone.getSystemName().isCd() ? 3 : 1); } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private final VespaServiceDumper serviceDumper; private final List<ContainerWireguardTask> wireguardTasks; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION, serviceDumper, wireguardTasks); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); this.serviceDumper = serviceDumper; this.wireguardTasks = new ArrayList<>(wireguardTasks); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { converge(contextSupplier.nextContext()); } catch (ContextSupplierInterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Invoking vespa-nodectl to start services"); String output = containerOperations.startServices(context); if (!output.isBlank()) { context.log(logger, "Start services output: " + output); } hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, "Invoking vespa-nodectl to resume services"); String output = containerOperations.resumeNode(context); if (!output.isBlank()) { context.log(logger, "Resume services output: " + output); } hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context, Optional<Instant> containerCreatedAt) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); boolean changed = false; if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); changed = true; } boolean createdAtAfterRebootedEvent = context.node().events().stream() .filter(event -> event.type().equals("rebooted")) .map(event -> containerCreatedAt .map(createdAt -> createdAt.isAfter(event.at())) .orElse(false)) .findFirst() .orElse(containerCreatedAt.isPresent()); if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration) || createdAtAfterRebootedEvent) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); changed = true; } Optional<DockerImage> wantedDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), wantedDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = wantedDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(context.node().currentVespaVersion().orElse(Version.emptyVersion)); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); changed = true; } if (changed) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentNodeAttributes, newNodeAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newNodeAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); ContainerData containerData = containerOperations.createContainer(context, wantedResources); writeContainerData(context, containerData); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason); orchestratorSuspendNode(context); ContainerResources currentResources = existingContainer.get().resources(); ContainerResources wantedResources = currentResources.withUnlimitedCpus(); if ( ! warmUpDuration(context).isNegative() && ! wantedResources.equals(currentResources)) { context.log(logger, "Updating container resources: %s -> %s", existingContainer.get().resources().toStringCpu(), wantedResources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.get().id(), wantedResources); } String output = containerOperations.restartVespa(context); if ( ! output.isBlank()) { context.log(logger, "Restart services output: " + output); } currentRestartGeneration = context.node().wantedRestartGeneration(); firstSuccessfulHealthCheckInstant = Optional.empty(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state().isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { if (containerState == ABSENT) return; try { hasResumedNode = false; context.log(logger, "Invoking vespa-nodectl to suspend services"); String output = containerOperations.suspendNode(context); if (!output.isBlank()) { context.log(logger, "Suspend services output: " + output); } } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image())) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image().asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state().isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources())) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources().toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state().isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() == NodeState.active) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources())) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources().toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources().memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev; } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); if (e.isError()) numberOfUnhandledException++; } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration.dividedBy(zone.getSystemName().isCd() ? 3 : 1); } }
consider something like `.collect(Collectors.toSet())` instead of `.toList()`
private static Collection<String> getSchemasWithGlobalPhase(DeployState state) { return state.rankProfileRegistry().all().stream() .filter(rp -> rp.getGlobalPhase() != null).map(rp -> rp.schema().getName()).toList(); }
.filter(rp -> rp.getGlobalPhase() != null).map(rp -> rp.schema().getName()).toList();
private static Collection<String> getSchemasWithGlobalPhase(DeployState state) { return state.rankProfileRegistry().all().stream() .filter(rp -> rp.getGlobalPhase() != null).map(rp -> rp.schema().getName()).collect(Collectors.toSet()); }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer, SchemaInfoConfig.Producer { public static final String QUERY_PROFILE_REGISTRY_CLASS = CompiledQueryProfileRegistry.class.getName(); private final ApplicationContainerCluster owningCluster; private final List<SearchCluster> searchClusters = new LinkedList<>(); private final Collection<String> schemasWithGlobalPhase; private final boolean globalPhase; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; public ContainerSearch(DeployState deployState, ApplicationContainerCluster cluster, SearchChains chains) { super(chains); this.globalPhase = deployState.featureFlags().enableGlobalPhase(); this.schemasWithGlobalPhase = getSchemasWithGlobalPhase(deployState); this.owningCluster = cluster; owningCluster.addComponent(Component.fromClassAndBundle(CompiledQueryProfileRegistry.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.schema.SchemaInfo.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(SearchStatusExtension.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(RankProfilesEvaluatorFactory.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.ranking.GlobalPhaseRanker.class, SEARCH_AND_DOCPROC_BUNDLE)); cluster.addSearchAndDocprocBundles(); } public void connectSearchClusters(Map<String, SearchCluster> searchClusters) { this.searchClusters.addAll(searchClusters.values()); initializeDispatchers(searchClusters.values()); initializeSearchChains(searchClusters); } /** Adds a Dispatcher component to the owning container cluster for each search cluster */ private void initializeDispatchers(Collection<SearchCluster> searchClusters) { for (SearchCluster searchCluster : searchClusters) { if (searchCluster instanceof IndexedSearchCluster indexed) { var dispatcher = new DispatcherComponent(indexed); owningCluster.addComponent(dispatcher); } if (globalPhase) { for (var documentDb : searchCluster.getDocumentDbs()) { if (!schemasWithGlobalPhase.contains(documentDb.getSchemaName())) continue; var factory = new RankProfilesEvaluatorComponent(documentDb); if (! owningCluster.getComponentsMap().containsKey(factory.getComponentId())) { owningCluster.addComponent(factory); } } } } } public void initializeSearchChains(Map<String, ? extends SearchCluster> searchClusters) { getChains().initialize(searchClusters); } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles != null) { queryProfiles.getConfig(builder); } } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules != null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates != null) pageTemplates.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(SchemaInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < searchClusters.size(); i++) { SearchCluster sys = findClusterWithId(searchClusters, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (SchemaInfo spec : sys.schemas().values()) { scB.searchdef(spec.fullSchema().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); scB.globalphase(globalPhase); if ( ! (sys instanceof IndexedSearchCluster)) { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static SearchCluster findClusterWithId(List<SearchCluster> clusters, int index) { for (SearchCluster sys : clusters) { if (sys.getClusterIndex() == index) return sys; } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer, SchemaInfoConfig.Producer { public static final String QUERY_PROFILE_REGISTRY_CLASS = CompiledQueryProfileRegistry.class.getName(); private final ApplicationContainerCluster owningCluster; private final List<SearchCluster> searchClusters = new LinkedList<>(); private final Collection<String> schemasWithGlobalPhase; private final boolean globalPhase; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; public ContainerSearch(DeployState deployState, ApplicationContainerCluster cluster, SearchChains chains) { super(chains); this.globalPhase = deployState.featureFlags().enableGlobalPhase(); this.schemasWithGlobalPhase = getSchemasWithGlobalPhase(deployState); this.owningCluster = cluster; owningCluster.addComponent(Component.fromClassAndBundle(CompiledQueryProfileRegistry.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.schema.SchemaInfo.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(SearchStatusExtension.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(RankProfilesEvaluatorFactory.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.ranking.GlobalPhaseRanker.class, SEARCH_AND_DOCPROC_BUNDLE)); cluster.addSearchAndDocprocBundles(); } public void connectSearchClusters(Map<String, SearchCluster> searchClusters) { this.searchClusters.addAll(searchClusters.values()); initializeDispatchers(searchClusters.values()); initializeSearchChains(searchClusters); } /** Adds a Dispatcher component to the owning container cluster for each search cluster */ private void initializeDispatchers(Collection<SearchCluster> searchClusters) { for (SearchCluster searchCluster : searchClusters) { if (searchCluster instanceof IndexedSearchCluster indexed) { var dispatcher = new DispatcherComponent(indexed); owningCluster.addComponent(dispatcher); } if (globalPhase) { for (var documentDb : searchCluster.getDocumentDbs()) { if (!schemasWithGlobalPhase.contains(documentDb.getSchemaName())) continue; var factory = new RankProfilesEvaluatorComponent(documentDb); if (! owningCluster.getComponentsMap().containsKey(factory.getComponentId())) { owningCluster.addComponent(factory); } } } } } public void initializeSearchChains(Map<String, ? extends SearchCluster> searchClusters) { getChains().initialize(searchClusters); } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles != null) { queryProfiles.getConfig(builder); } } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules != null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates != null) pageTemplates.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(SchemaInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < searchClusters.size(); i++) { SearchCluster sys = findClusterWithId(searchClusters, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (SchemaInfo spec : sys.schemas().values()) { scB.searchdef(spec.fullSchema().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); scB.globalphase(globalPhase); if ( ! (sys instanceof IndexedSearchCluster)) { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static SearchCluster findClusterWithId(List<SearchCluster> clusters, int index) { for (SearchCluster sys : clusters) { if (sys.getClusterIndex() == index) return sys; } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } }
Addressed in [4b55e59](https://github.com/vespa-engine/vespa/commit/4b55e59b0bdda6559a40addf0ede434ab955dc07)
private static Collection<String> getSchemasWithGlobalPhase(DeployState state) { return state.rankProfileRegistry().all().stream() .filter(rp -> rp.getGlobalPhase() != null).map(rp -> rp.schema().getName()).toList(); }
.filter(rp -> rp.getGlobalPhase() != null).map(rp -> rp.schema().getName()).toList();
private static Collection<String> getSchemasWithGlobalPhase(DeployState state) { return state.rankProfileRegistry().all().stream() .filter(rp -> rp.getGlobalPhase() != null).map(rp -> rp.schema().getName()).collect(Collectors.toSet()); }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer, SchemaInfoConfig.Producer { public static final String QUERY_PROFILE_REGISTRY_CLASS = CompiledQueryProfileRegistry.class.getName(); private final ApplicationContainerCluster owningCluster; private final List<SearchCluster> searchClusters = new LinkedList<>(); private final Collection<String> schemasWithGlobalPhase; private final boolean globalPhase; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; public ContainerSearch(DeployState deployState, ApplicationContainerCluster cluster, SearchChains chains) { super(chains); this.globalPhase = deployState.featureFlags().enableGlobalPhase(); this.schemasWithGlobalPhase = getSchemasWithGlobalPhase(deployState); this.owningCluster = cluster; owningCluster.addComponent(Component.fromClassAndBundle(CompiledQueryProfileRegistry.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.schema.SchemaInfo.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(SearchStatusExtension.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(RankProfilesEvaluatorFactory.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.ranking.GlobalPhaseRanker.class, SEARCH_AND_DOCPROC_BUNDLE)); cluster.addSearchAndDocprocBundles(); } public void connectSearchClusters(Map<String, SearchCluster> searchClusters) { this.searchClusters.addAll(searchClusters.values()); initializeDispatchers(searchClusters.values()); initializeSearchChains(searchClusters); } /** Adds a Dispatcher component to the owning container cluster for each search cluster */ private void initializeDispatchers(Collection<SearchCluster> searchClusters) { for (SearchCluster searchCluster : searchClusters) { if (searchCluster instanceof IndexedSearchCluster indexed) { var dispatcher = new DispatcherComponent(indexed); owningCluster.addComponent(dispatcher); } if (globalPhase) { for (var documentDb : searchCluster.getDocumentDbs()) { if (!schemasWithGlobalPhase.contains(documentDb.getSchemaName())) continue; var factory = new RankProfilesEvaluatorComponent(documentDb); if (! owningCluster.getComponentsMap().containsKey(factory.getComponentId())) { owningCluster.addComponent(factory); } } } } } public void initializeSearchChains(Map<String, ? extends SearchCluster> searchClusters) { getChains().initialize(searchClusters); } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles != null) { queryProfiles.getConfig(builder); } } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules != null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates != null) pageTemplates.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(SchemaInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < searchClusters.size(); i++) { SearchCluster sys = findClusterWithId(searchClusters, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (SchemaInfo spec : sys.schemas().values()) { scB.searchdef(spec.fullSchema().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); scB.globalphase(globalPhase); if ( ! (sys instanceof IndexedSearchCluster)) { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static SearchCluster findClusterWithId(List<SearchCluster> clusters, int index) { for (SearchCluster sys : clusters) { if (sys.getClusterIndex() == index) return sys; } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer, SchemaInfoConfig.Producer { public static final String QUERY_PROFILE_REGISTRY_CLASS = CompiledQueryProfileRegistry.class.getName(); private final ApplicationContainerCluster owningCluster; private final List<SearchCluster> searchClusters = new LinkedList<>(); private final Collection<String> schemasWithGlobalPhase; private final boolean globalPhase; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; public ContainerSearch(DeployState deployState, ApplicationContainerCluster cluster, SearchChains chains) { super(chains); this.globalPhase = deployState.featureFlags().enableGlobalPhase(); this.schemasWithGlobalPhase = getSchemasWithGlobalPhase(deployState); this.owningCluster = cluster; owningCluster.addComponent(Component.fromClassAndBundle(CompiledQueryProfileRegistry.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.schema.SchemaInfo.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(SearchStatusExtension.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(RankProfilesEvaluatorFactory.class, SEARCH_AND_DOCPROC_BUNDLE)); owningCluster.addComponent(Component.fromClassAndBundle(com.yahoo.search.ranking.GlobalPhaseRanker.class, SEARCH_AND_DOCPROC_BUNDLE)); cluster.addSearchAndDocprocBundles(); } public void connectSearchClusters(Map<String, SearchCluster> searchClusters) { this.searchClusters.addAll(searchClusters.values()); initializeDispatchers(searchClusters.values()); initializeSearchChains(searchClusters); } /** Adds a Dispatcher component to the owning container cluster for each search cluster */ private void initializeDispatchers(Collection<SearchCluster> searchClusters) { for (SearchCluster searchCluster : searchClusters) { if (searchCluster instanceof IndexedSearchCluster indexed) { var dispatcher = new DispatcherComponent(indexed); owningCluster.addComponent(dispatcher); } if (globalPhase) { for (var documentDb : searchCluster.getDocumentDbs()) { if (!schemasWithGlobalPhase.contains(documentDb.getSchemaName())) continue; var factory = new RankProfilesEvaluatorComponent(documentDb); if (! owningCluster.getComponentsMap().containsKey(factory.getComponentId())) { owningCluster.addComponent(factory); } } } } } public void initializeSearchChains(Map<String, ? extends SearchCluster> searchClusters) { getChains().initialize(searchClusters); } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles != null) { queryProfiles.getConfig(builder); } } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules != null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates != null) pageTemplates.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(SchemaInfoConfig.Builder builder) { for (SearchCluster sc : searchClusters) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < searchClusters.size(); i++) { SearchCluster sys = findClusterWithId(searchClusters, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (SchemaInfo spec : sys.schemas().values()) { scB.searchdef(spec.fullSchema().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); scB.globalphase(globalPhase); if ( ! (sys instanceof IndexedSearchCluster)) { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static SearchCluster findClusterWithId(List<SearchCluster> clusters, int index) { for (SearchCluster sys : clusters) { if (sys.getClusterIndex() == index) return sys; } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } }
When can this happen?
static Optional<Reference> parseOnnxInput(String input) { var optRef = Reference.simple(input); if (optRef.isPresent()) { return optRef; } try { var ref = Reference.fromIdentifier(input); return Optional.of(ref); } catch (Exception e) { } return Optional.empty(); }
static Optional<Reference> parseOnnxInput(String input) { var optRef = Reference.simple(input); if (optRef.isPresent()) { return optRef; } try { var ref = Reference.fromIdentifier(input); return Optional.of(ref); } catch (Exception e) { } return Optional.empty(); }
class OnnxExpressionNode extends CompositeNode { private final OnnxModel model; private final String onnxOutputName; private final TensorType expectedType; private final String outputAs; private final List<String> modelInputs = new ArrayList<>(); private final List<ExpressionNode> inputRefs = new ArrayList<>(); OnnxExpressionNode(OnnxModel model, String onnxOutputName, TensorType expectedType, String outputAs) { this.model = model; this.onnxOutputName = onnxOutputName; this.expectedType = expectedType; this.outputAs = outputAs; for (var input : model.inputSpecs) { modelInputs.add(input.onnxName); var optRef = parseOnnxInput(input.source); if (optRef.isEmpty()) { throw new IllegalArgumentException("Bad input source for ONNX model " + model.name() + ": '" + input + "'"); } var ref = optRef.get(); inputRefs.add(new ReferenceNode(ref)); } } @Override public List<ExpressionNode> children() { return List.copyOf(inputRefs); } @Override public CompositeNode setChildren(List<ExpressionNode> children) { if (inputRefs.size() != children.size()) { throw new IllegalArgumentException("bad setChildren"); } inputRefs.clear(); inputRefs.addAll(children); return this; } @Override public Value evaluate(Context context) { Map<String, Tensor> inputs = new HashMap<>(); for (int i = 0; i < modelInputs.size(); i++) { Value inputValue = inputRefs.get(i).evaluate(context); inputs.put(modelInputs.get(i), inputValue.asTensor()); } return new TensorValue(model.evaluate(inputs, onnxOutputName)); } @Override public TensorType type(TypeContext<Reference> context) { return expectedType; } @Override public int hashCode() { return Objects.hash("OnnxExpressionNode", model.name(), onnxOutputName); } @Override public StringBuilder toString(StringBuilder b, SerializationContext context, Deque<String> path, CompositeNode parent) { b.append("onnx_expression_node(").append(model.name()).append(")"); if (outputAs != null && ! outputAs.equals("")) { b.append(".").append(outputAs); } return b; } }
class OnnxExpressionNode extends CompositeNode { private final OnnxModel model; private final String onnxOutputName; private final TensorType expectedType; private final String outputAs; private final List<String> modelInputs = new ArrayList<>(); private final List<ExpressionNode> inputRefs = new ArrayList<>(); OnnxExpressionNode(OnnxModel model, String onnxOutputName, TensorType expectedType, String outputAs) { this.model = model; this.onnxOutputName = onnxOutputName; this.expectedType = expectedType; this.outputAs = outputAs; for (var input : model.inputSpecs) { modelInputs.add(input.onnxName); var optRef = parseOnnxInput(input.source); if (optRef.isEmpty()) { throw new IllegalArgumentException("Bad input source for ONNX model " + model.name() + ": '" + input + "'"); } var ref = optRef.get(); inputRefs.add(new ReferenceNode(ref)); } } @Override public List<ExpressionNode> children() { return List.copyOf(inputRefs); } @Override public CompositeNode setChildren(List<ExpressionNode> children) { if (inputRefs.size() != children.size()) { throw new IllegalArgumentException("bad setChildren"); } inputRefs.clear(); inputRefs.addAll(children); return this; } @Override public Value evaluate(Context context) { Map<String, Tensor> inputs = new HashMap<>(); for (int i = 0; i < modelInputs.size(); i++) { Value inputValue = inputRefs.get(i).evaluate(context); inputs.put(modelInputs.get(i), inputValue.asTensor()); } return new TensorValue(model.evaluate(inputs, onnxOutputName)); } @Override public TensorType type(TypeContext<Reference> context) { return expectedType; } @Override public int hashCode() { return Objects.hash("OnnxExpressionNode", model.name(), onnxOutputName); } @Override public StringBuilder toString(StringBuilder b, SerializationContext context, Deque<String> path, CompositeNode parent) { b.append("onnx_expression_node(").append(model.name()).append(")"); if (outputAs != null && ! outputAs.equals("")) { b.append(".").append(outputAs); } return b; } }
> When can this happen? only on unexpected configuration; it's there only to make it possible to give a better error message just above.
static Optional<Reference> parseOnnxInput(String input) { var optRef = Reference.simple(input); if (optRef.isPresent()) { return optRef; } try { var ref = Reference.fromIdentifier(input); return Optional.of(ref); } catch (Exception e) { } return Optional.empty(); }
static Optional<Reference> parseOnnxInput(String input) { var optRef = Reference.simple(input); if (optRef.isPresent()) { return optRef; } try { var ref = Reference.fromIdentifier(input); return Optional.of(ref); } catch (Exception e) { } return Optional.empty(); }
class OnnxExpressionNode extends CompositeNode { private final OnnxModel model; private final String onnxOutputName; private final TensorType expectedType; private final String outputAs; private final List<String> modelInputs = new ArrayList<>(); private final List<ExpressionNode> inputRefs = new ArrayList<>(); OnnxExpressionNode(OnnxModel model, String onnxOutputName, TensorType expectedType, String outputAs) { this.model = model; this.onnxOutputName = onnxOutputName; this.expectedType = expectedType; this.outputAs = outputAs; for (var input : model.inputSpecs) { modelInputs.add(input.onnxName); var optRef = parseOnnxInput(input.source); if (optRef.isEmpty()) { throw new IllegalArgumentException("Bad input source for ONNX model " + model.name() + ": '" + input + "'"); } var ref = optRef.get(); inputRefs.add(new ReferenceNode(ref)); } } @Override public List<ExpressionNode> children() { return List.copyOf(inputRefs); } @Override public CompositeNode setChildren(List<ExpressionNode> children) { if (inputRefs.size() != children.size()) { throw new IllegalArgumentException("bad setChildren"); } inputRefs.clear(); inputRefs.addAll(children); return this; } @Override public Value evaluate(Context context) { Map<String, Tensor> inputs = new HashMap<>(); for (int i = 0; i < modelInputs.size(); i++) { Value inputValue = inputRefs.get(i).evaluate(context); inputs.put(modelInputs.get(i), inputValue.asTensor()); } return new TensorValue(model.evaluate(inputs, onnxOutputName)); } @Override public TensorType type(TypeContext<Reference> context) { return expectedType; } @Override public int hashCode() { return Objects.hash("OnnxExpressionNode", model.name(), onnxOutputName); } @Override public StringBuilder toString(StringBuilder b, SerializationContext context, Deque<String> path, CompositeNode parent) { b.append("onnx_expression_node(").append(model.name()).append(")"); if (outputAs != null && ! outputAs.equals("")) { b.append(".").append(outputAs); } return b; } }
class OnnxExpressionNode extends CompositeNode { private final OnnxModel model; private final String onnxOutputName; private final TensorType expectedType; private final String outputAs; private final List<String> modelInputs = new ArrayList<>(); private final List<ExpressionNode> inputRefs = new ArrayList<>(); OnnxExpressionNode(OnnxModel model, String onnxOutputName, TensorType expectedType, String outputAs) { this.model = model; this.onnxOutputName = onnxOutputName; this.expectedType = expectedType; this.outputAs = outputAs; for (var input : model.inputSpecs) { modelInputs.add(input.onnxName); var optRef = parseOnnxInput(input.source); if (optRef.isEmpty()) { throw new IllegalArgumentException("Bad input source for ONNX model " + model.name() + ": '" + input + "'"); } var ref = optRef.get(); inputRefs.add(new ReferenceNode(ref)); } } @Override public List<ExpressionNode> children() { return List.copyOf(inputRefs); } @Override public CompositeNode setChildren(List<ExpressionNode> children) { if (inputRefs.size() != children.size()) { throw new IllegalArgumentException("bad setChildren"); } inputRefs.clear(); inputRefs.addAll(children); return this; } @Override public Value evaluate(Context context) { Map<String, Tensor> inputs = new HashMap<>(); for (int i = 0; i < modelInputs.size(); i++) { Value inputValue = inputRefs.get(i).evaluate(context); inputs.put(modelInputs.get(i), inputValue.asTensor()); } return new TensorValue(model.evaluate(inputs, onnxOutputName)); } @Override public TensorType type(TypeContext<Reference> context) { return expectedType; } @Override public int hashCode() { return Objects.hash("OnnxExpressionNode", model.name(), onnxOutputName); } @Override public StringBuilder toString(StringBuilder b, SerializationContext context, Deque<String> path, CompositeNode parent) { b.append("onnx_expression_node(").append(model.name()).append(")"); if (outputAs != null && ! outputAs.equals("")) { b.append(".").append(outputAs); } return b; } }
Do we really want the controller to die over this?
private static NodeResources addResources(NodeResources a, NodeResources b) { assert a.architecture() == b.architecture() || a.architecture() == NodeResources.Architecture.any || b.architecture() == NodeResources.Architecture.any; return new NodeResources( a.vcpu() + b.vcpu(), a.memoryGb() + b.memoryGb(), a.diskGb() + b.diskGb(), 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any, a.architecture(), a.gpuResources().plus(b.gpuResources())); }
assert a.architecture() == b.architecture() || a.architecture() == NodeResources.Architecture.any || b.architecture() == NodeResources.Architecture.any;
private static NodeResources addResources(NodeResources a, NodeResources b) { if (a.architecture() != b.architecture() && a.architecture() != NodeResources.Architecture.any && b.architecture() != NodeResources.Architecture.any) { throw new IllegalArgumentException(a + " and " + b + " are not interchangeable for resource snapshots"); } return new NodeResources( a.vcpu() + b.vcpu(), a.memoryGb() + b.memoryGb(), a.diskGb() + b.diskGb(), 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any, a.architecture(), a.gpuResources().plus(b.gpuResources())); }
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version version) { this.applicationId = applicationId; this.resources = resources; this.timestamp = timestamp; this.zoneId = zoneId; this.version = version; } public static ResourceSnapshot from(ApplicationId applicationId, int nodes, NodeResources resources, Instant timestamp, ZoneId zoneId) { return new ResourceSnapshot(applicationId, resources.multipliedBy(nodes), timestamp, zoneId, Version.emptyVersion); } public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect(Collectors.toSet()); if (applicationIds.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one application"); if (versions.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one version"); var resources = nodes.stream() .map(Node::resources) .reduce(NodeResources.zero(), ResourceSnapshot::addResources); return new ResourceSnapshot(applicationIds.iterator().next(), resources, timestamp, zoneId, versions.iterator().next()); } public ApplicationId getApplicationId() { return applicationId; } public NodeResources resources() { return resources; } public Instant getTimestamp() { return timestamp; } public ZoneId getZoneId() { return zoneId; } public Version getVersion() { return version; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ResourceSnapshot)) return false; ResourceSnapshot other = (ResourceSnapshot) o; return this.applicationId.equals(other.applicationId) && this.resources.equals(other.resources) && this.timestamp.equals(other.timestamp) && this.zoneId.equals(other.zoneId) && this.version.equals(other.version); } @Override public int hashCode(){ return Objects.hash(applicationId, resources, timestamp, zoneId, version); } /* This function does pretty much the same thing as NodeResources::add, but it allows adding resources * where some dimensions that are not relevant for billing (yet). * * TODO: Make this code respect all dimensions. */ }
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version version) { this.applicationId = applicationId; this.resources = resources; this.timestamp = timestamp; this.zoneId = zoneId; this.version = version; } public static ResourceSnapshot from(ApplicationId applicationId, int nodes, NodeResources resources, Instant timestamp, ZoneId zoneId) { return new ResourceSnapshot(applicationId, resources.multipliedBy(nodes), timestamp, zoneId, Version.emptyVersion); } public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect(Collectors.toSet()); if (applicationIds.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one application"); if (versions.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one version"); var resources = nodes.stream() .map(Node::resources) .reduce(NodeResources.zero(), ResourceSnapshot::addResources); return new ResourceSnapshot(applicationIds.iterator().next(), resources, timestamp, zoneId, versions.iterator().next()); } public ApplicationId getApplicationId() { return applicationId; } public NodeResources resources() { return resources; } public Instant getTimestamp() { return timestamp; } public ZoneId getZoneId() { return zoneId; } public Version getVersion() { return version; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ResourceSnapshot)) return false; ResourceSnapshot other = (ResourceSnapshot) o; return this.applicationId.equals(other.applicationId) && this.resources.equals(other.resources) && this.timestamp.equals(other.timestamp) && this.zoneId.equals(other.zoneId) && this.version.equals(other.version); } @Override public int hashCode(){ return Objects.hash(applicationId, resources, timestamp, zoneId, version); } /* This function does pretty much the same thing as NodeResources::add, but it allows adding resources * where some dimensions that are not relevant for billing (yet) are not the same. * * TODO: Make this code respect all dimensions. */ }
Good point. Will change to regular exception.
private static NodeResources addResources(NodeResources a, NodeResources b) { assert a.architecture() == b.architecture() || a.architecture() == NodeResources.Architecture.any || b.architecture() == NodeResources.Architecture.any; return new NodeResources( a.vcpu() + b.vcpu(), a.memoryGb() + b.memoryGb(), a.diskGb() + b.diskGb(), 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any, a.architecture(), a.gpuResources().plus(b.gpuResources())); }
assert a.architecture() == b.architecture() || a.architecture() == NodeResources.Architecture.any || b.architecture() == NodeResources.Architecture.any;
private static NodeResources addResources(NodeResources a, NodeResources b) { if (a.architecture() != b.architecture() && a.architecture() != NodeResources.Architecture.any && b.architecture() != NodeResources.Architecture.any) { throw new IllegalArgumentException(a + " and " + b + " are not interchangeable for resource snapshots"); } return new NodeResources( a.vcpu() + b.vcpu(), a.memoryGb() + b.memoryGb(), a.diskGb() + b.diskGb(), 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any, a.architecture(), a.gpuResources().plus(b.gpuResources())); }
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version version) { this.applicationId = applicationId; this.resources = resources; this.timestamp = timestamp; this.zoneId = zoneId; this.version = version; } public static ResourceSnapshot from(ApplicationId applicationId, int nodes, NodeResources resources, Instant timestamp, ZoneId zoneId) { return new ResourceSnapshot(applicationId, resources.multipliedBy(nodes), timestamp, zoneId, Version.emptyVersion); } public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect(Collectors.toSet()); if (applicationIds.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one application"); if (versions.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one version"); var resources = nodes.stream() .map(Node::resources) .reduce(NodeResources.zero(), ResourceSnapshot::addResources); return new ResourceSnapshot(applicationIds.iterator().next(), resources, timestamp, zoneId, versions.iterator().next()); } public ApplicationId getApplicationId() { return applicationId; } public NodeResources resources() { return resources; } public Instant getTimestamp() { return timestamp; } public ZoneId getZoneId() { return zoneId; } public Version getVersion() { return version; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ResourceSnapshot)) return false; ResourceSnapshot other = (ResourceSnapshot) o; return this.applicationId.equals(other.applicationId) && this.resources.equals(other.resources) && this.timestamp.equals(other.timestamp) && this.zoneId.equals(other.zoneId) && this.version.equals(other.version); } @Override public int hashCode(){ return Objects.hash(applicationId, resources, timestamp, zoneId, version); } /* This function does pretty much the same thing as NodeResources::add, but it allows adding resources * where some dimensions that are not relevant for billing (yet). * * TODO: Make this code respect all dimensions. */ }
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version version) { this.applicationId = applicationId; this.resources = resources; this.timestamp = timestamp; this.zoneId = zoneId; this.version = version; } public static ResourceSnapshot from(ApplicationId applicationId, int nodes, NodeResources resources, Instant timestamp, ZoneId zoneId) { return new ResourceSnapshot(applicationId, resources.multipliedBy(nodes), timestamp, zoneId, Version.emptyVersion); } public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect(Collectors.toSet()); if (applicationIds.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one application"); if (versions.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one version"); var resources = nodes.stream() .map(Node::resources) .reduce(NodeResources.zero(), ResourceSnapshot::addResources); return new ResourceSnapshot(applicationIds.iterator().next(), resources, timestamp, zoneId, versions.iterator().next()); } public ApplicationId getApplicationId() { return applicationId; } public NodeResources resources() { return resources; } public Instant getTimestamp() { return timestamp; } public ZoneId getZoneId() { return zoneId; } public Version getVersion() { return version; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ResourceSnapshot)) return false; ResourceSnapshot other = (ResourceSnapshot) o; return this.applicationId.equals(other.applicationId) && this.resources.equals(other.resources) && this.timestamp.equals(other.timestamp) && this.zoneId.equals(other.zoneId) && this.version.equals(other.version); } @Override public int hashCode(){ return Objects.hash(applicationId, resources, timestamp, zoneId, version); } /* This function does pretty much the same thing as NodeResources::add, but it allows adding resources * where some dimensions that are not relevant for billing (yet) are not the same. * * TODO: Make this code respect all dimensions. */ }
I think this is fine, but why not do it simply by doing the deploy attempt outside the try-with block in autoscale?
protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; if (nodeRepository().zone().environment().isTest()) return 1.0; int attempts = 0; int failures = 0; for (var applicationNodes : activeNodesByApplication().entrySet()) { for (var clusterNodes : nodesByCluster(applicationNodes.getValue()).entrySet()) { attempts++; Result result = autoscale(applicationNodes.getKey(), clusterNodes.getKey()); switch (result) { case lockFailure -> failures++; case resourceChange -> redeploy(applicationNodes.getKey()); } } } return asSuccessFactor(attempts, failures); }
Result result = autoscale(applicationNodes.getKey(), clusterNodes.getKey());
protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; if (nodeRepository().zone().environment().isTest()) return 1.0; int attempts = 0; int failures = 0; for (var applicationNodes : activeNodesByApplication().entrySet()) { for (var clusterNodes : nodesByCluster(applicationNodes.getValue()).entrySet()) { attempts++; if ( ! autoscale(applicationNodes.getKey(), clusterNodes.getKey())) failures++; } } return asSuccessFactor(attempts, failures); }
class AutoscalingMaintainer extends NodeRepositoryMaintainer { private final Autoscaler autoscaler; private final Deployer deployer; private final Metric metric; public AutoscalingMaintainer(NodeRepository nodeRepository, Deployer deployer, Metric metric, Duration interval) { super(nodeRepository, interval, metric); this.autoscaler = new Autoscaler(nodeRepository); this.deployer = deployer; this.metric = metric; } @Override private void redeploy(ApplicationId application) { MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository()); if (deployment.isValid()) deployment.activate(); } /** * Autoscales the given cluster. * * @return result of autoscaling attempt */ private Result autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId) { try (var lock = nodeRepository().applications().lock(applicationId)) { Optional<Application> application = nodeRepository().applications().get(applicationId); if (application.isEmpty()) return Result.noop; if (application.get().cluster(clusterId).isEmpty()) return Result.noop; Cluster cluster = application.get().cluster(clusterId).get(); NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId); cluster = updateCompletion(cluster, clusterNodes); var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources(); Autoscaling autoscaling = null; if (cluster.target().resources().isEmpty() || current.equals(cluster.target().resources().get())) { autoscaling = autoscaler.autoscale(application.get(), cluster, clusterNodes); if ( autoscaling.isPresent() || cluster.target().isEmpty()) cluster = cluster.withTarget(autoscaling); } applications().put(application.get().with(cluster), lock); if (autoscaling != null && autoscaling.resources().isPresent() && !current.equals(autoscaling.resources().get())) { logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes.not().retired()); return Result.resourceChange; } return Result.noop; } catch (ApplicationLockException e) { return Result.lockFailure; } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Illegal arguments for " + applicationId + " cluster " + clusterId, e); } } private Applications applications() { return nodeRepository().applications(); } /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */ private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) { if (cluster.lastScalingEvent().isEmpty()) return cluster; var event = cluster.lastScalingEvent().get(); if (event.completion().isPresent()) return cluster; if (clusterNodes.retired().stream() .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at()))) return cluster; for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()), clusterNodes)) { Optional<NodeMetricSnapshot> onNewGeneration = nodeTimeseries.asList().stream() .filter(snapshot -> snapshot.generation() >= event.generation()).findAny(); if (onNewGeneration.isEmpty()) return cluster; } Instant completionTime = nodeRepository().clock().instant(); return cluster.with(event.withCompletion(completionTime)); } private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) { log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + "\nfrom " + toString(from) + "\nto " + toString(to)); } static String toString(ClusterResources r) { return r + " (total: " + r.totalResources() + ")"; } private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) { return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id()); } private enum Result { /** No change needed */ noop, /** Failed to acquire lock */ lockFailure, /** Autoscaling changed resources */ resourceChange; } }
class AutoscalingMaintainer extends NodeRepositoryMaintainer { private final Autoscaler autoscaler; private final Deployer deployer; private final Metric metric; public AutoscalingMaintainer(NodeRepository nodeRepository, Deployer deployer, Metric metric, Duration interval) { super(nodeRepository, interval, metric); this.autoscaler = new Autoscaler(nodeRepository); this.deployer = deployer; this.metric = metric; } @Override /** * Autoscales the given cluster. * * @return true if an autoscaling decision was made or nothing should be done, false if there was an error */ private boolean autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId) { boolean redeploy = false; try (var lock = nodeRepository().applications().lock(applicationId)) { Optional<Application> application = nodeRepository().applications().get(applicationId); if (application.isEmpty()) return true; if (application.get().cluster(clusterId).isEmpty()) return true; Cluster cluster = application.get().cluster(clusterId).get(); NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId); cluster = updateCompletion(cluster, clusterNodes); var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources(); Autoscaling autoscaling = null; if (cluster.target().resources().isEmpty() || current.equals(cluster.target().resources().get())) { autoscaling = autoscaler.autoscale(application.get(), cluster, clusterNodes); if ( autoscaling.isPresent() || cluster.target().isEmpty()) cluster = cluster.withTarget(autoscaling); } applications().put(application.get().with(cluster), lock); if (autoscaling != null && autoscaling.resources().isPresent() && !current.equals(autoscaling.resources().get())) { redeploy = true; logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes.not().retired()); } } catch (ApplicationLockException e) { return false; } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Illegal arguments for " + applicationId + " cluster " + clusterId, e); } if (redeploy) { try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) { if (deployment.isValid()) deployment.activate(); } } return true; } private Applications applications() { return nodeRepository().applications(); } /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */ private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) { if (cluster.lastScalingEvent().isEmpty()) return cluster; var event = cluster.lastScalingEvent().get(); if (event.completion().isPresent()) return cluster; if (clusterNodes.retired().stream() .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at()))) return cluster; for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()), clusterNodes)) { Optional<NodeMetricSnapshot> onNewGeneration = nodeTimeseries.asList().stream() .filter(snapshot -> snapshot.generation() >= event.generation()).findAny(); if (onNewGeneration.isEmpty()) return cluster; } Instant completionTime = nodeRepository().clock().instant(); return cluster.with(event.withCompletion(completionTime)); } private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) { log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + "\nfrom " + toString(from) + "\nto " + toString(to)); } static String toString(ClusterResources r) { return r + " (total: " + r.totalResources() + ")"; } private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) { return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id()); } }
requireNonNull here?
public Builder cloudAccount(CloudAccount cloudAccount) { this.cloudAccount = cloudAccount; return this; }
this.cloudAccount = cloudAccount;
public Builder cloudAccount(CloudAccount cloudAccount) { this.cloudAccount = cloudAccount; return this; }
class Builder { private String hostname; private String id; private NodeState state; private NodeType type; private CloudAccount cloudAccount = CloudAccount.empty; private String flavor; private Optional<DockerImage> wantedDockerImage = Optional.empty(); private Optional<DockerImage> currentDockerImage = Optional.empty(); private Optional<Version> wantedVespaVersion = Optional.empty(); private Optional<Version> currentVespaVersion = Optional.empty(); private Optional<Version> wantedOsVersion = Optional.empty(); private Optional<Version> currentOsVersion = Optional.empty(); private OrchestratorStatus orchestratorStatus = OrchestratorStatus.NO_REMARKS; private Optional<ApplicationId> owner = Optional.empty(); private Optional<NodeMembership> membership = Optional.empty(); private Optional<Long> wantedRestartGeneration = Optional.empty(); private Optional<Long> currentRestartGeneration = Optional.empty(); private long wantedRebootGeneration; private long currentRebootGeneration; private Optional<Instant> wantedFirmwareCheck = Optional.empty(); private Optional<Instant> currentFirmwareCheck = Optional.empty(); private Optional<String> modelName = Optional.empty(); private NodeResources resources; private NodeResources realResources; private Set<String> ipAddresses = Set.of(); private Set<String> additionalIpAddresses = Set.of(); private NodeReports reports = new NodeReports(); private List<Event> events = List.of(); private Optional<String> parentHostname = Optional.empty(); private Optional<URI> archiveUri = Optional.empty(); private Optional<ApplicationId> exclusiveTo = Optional.empty(); private List<TrustStoreItem> trustStore = List.of(); private Optional<WireguardKey> wireguardPubkey = Optional.empty(); private boolean wantToRebuild = false; public Builder() {} public Builder(NodeSpec node) { hostname(node.hostname); id(node.id); state(node.state); type(node.type); flavor(node.flavor); resources(node.resources); realResources(node.realResources); ipAddresses(node.ipAddresses); additionalIpAddresses(node.additionalIpAddresses); wantedRebootGeneration(node.wantedRebootGeneration); currentRebootGeneration(node.currentRebootGeneration); orchestratorStatus(node.orchestratorStatus); reports(new NodeReports(node.reports)); events(node.events); node.wantedDockerImage.ifPresent(this::wantedDockerImage); node.currentDockerImage.ifPresent(this::currentDockerImage); node.wantedVespaVersion.ifPresent(this::wantedVespaVersion); node.currentVespaVersion.ifPresent(this::currentVespaVersion); node.wantedOsVersion.ifPresent(this::wantedOsVersion); node.currentOsVersion.ifPresent(this::currentOsVersion); node.owner.ifPresent(this::owner); node.membership.ifPresent(this::membership); node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration); node.currentRestartGeneration.ifPresent(this::currentRestartGeneration); node.wantedFirmwareCheck.ifPresent(this::wantedFirmwareCheck); node.currentFirmwareCheck.ifPresent(this::currentFirmwareCheck); node.parentHostname.ifPresent(this::parentHostname); node.archiveUri.ifPresent(this::archiveUri); node.exclusiveTo.ifPresent(this::exclusiveTo); trustStore(node.trustStore); node.wireguardPubkey.ifPresent(this::wireguardPubkey); wantToRebuild(node.wantToRebuild); } public Builder hostname(String hostname) { this.hostname = hostname; return this; } public Builder id(String id) { this.id = id; return this; } public Builder wantedDockerImage(DockerImage wantedDockerImage) { this.wantedDockerImage = Optional.of(wantedDockerImage); return this; } public Builder currentDockerImage(DockerImage currentDockerImage) { this.currentDockerImage = Optional.of(currentDockerImage); return this; } public Builder state(NodeState state) { this.state = state; return this; } public Builder type(NodeType nodeType) { this.type = nodeType; return this; } public Builder flavor(String flavor) { this.flavor = flavor; return this; } public Builder wantedVespaVersion(Version wantedVespaVersion) { this.wantedVespaVersion = Optional.of(wantedVespaVersion); return this; } public Builder currentVespaVersion(Version vespaVersion) { this.currentVespaVersion = Optional.of(vespaVersion); return this; } public Builder wantedOsVersion(Version wantedOsVersion) { this.wantedOsVersion = Optional.of(wantedOsVersion); return this; } public Builder currentOsVersion(Version currentOsVersion) { this.currentOsVersion = Optional.of(currentOsVersion); return this; } public Builder orchestratorStatus(OrchestratorStatus orchestratorStatus) { this.orchestratorStatus = orchestratorStatus; return this; } public Builder owner(ApplicationId owner) { this.owner = Optional.of(owner); return this; } public Builder membership(NodeMembership membership) { this.membership = Optional.of(membership); return this; } public Builder wantedRestartGeneration(long wantedRestartGeneration) { this.wantedRestartGeneration = Optional.of(wantedRestartGeneration); return this; } public Builder currentRestartGeneration(long currentRestartGeneration) { this.currentRestartGeneration = Optional.of(currentRestartGeneration); return this; } public Builder wantedRebootGeneration(long wantedRebootGeneration) { this.wantedRebootGeneration = wantedRebootGeneration; return this; } public Builder currentRebootGeneration(long currentRebootGeneration) { this.currentRebootGeneration = currentRebootGeneration; return this; } public Builder wantedFirmwareCheck(Instant wantedFirmwareCheck) { this.wantedFirmwareCheck = Optional.of(wantedFirmwareCheck); return this; } public Builder currentFirmwareCheck(Instant currentFirmwareCheck) { this.currentFirmwareCheck = Optional.of(currentFirmwareCheck); return this; } public Builder resources(NodeResources resources) { this.resources = resources; return this; } public Builder realResources(NodeResources realResources) { this.realResources = realResources; return this; } public Builder vcpu(double vcpu) { return realResources(realResources.withVcpu(vcpu)); } public Builder memoryGb(double memoryGb) { return realResources(realResources.withMemoryGb(memoryGb)); } public Builder diskGb(double diskGb) { return realResources(realResources.withDiskGb(diskGb)); } public Builder fastDisk(boolean fastDisk) { return realResources(realResources.with(fastDisk ? fast : slow)); } public Builder bandwidthGbps(double bandwidthGbps) { return realResources(realResources.withBandwidthGbps(bandwidthGbps)); } public Builder ipAddresses(Set<String> ipAddresses) { this.ipAddresses = ipAddresses; return this; } public Builder additionalIpAddresses(Set<String> additionalIpAddresses) { this.additionalIpAddresses = additionalIpAddresses; return this; } public Builder reports(NodeReports reports) { this.reports = reports; return this; } public Builder report(String reportId, JsonNode report) { this.reports.setReport(reportId, report); return this; } public Builder removeReport(String reportId) { reports.removeReport(reportId); return this; } public Builder events(List<Event> events) { this.events = events; return this; } public Builder parentHostname(String parentHostname) { this.parentHostname = Optional.of(parentHostname); return this; } public Builder archiveUri(URI archiveUri) { this.archiveUri = Optional.of(archiveUri); return this; } public Builder exclusiveTo(ApplicationId applicationId) { this.exclusiveTo = Optional.of(applicationId); return this; } public Builder trustStore(List<TrustStoreItem> trustStore) { this.trustStore = List.copyOf(trustStore); return this; } public Builder wireguardPubkey(WireguardKey wireguardKey) { wireguardPubkey = Optional.of(wireguardKey); return this; } public Builder wantToRebuild(boolean wantToRebuild) { this.wantToRebuild = wantToRebuild; return this; } public Builder updateFromNodeAttributes(NodeAttributes attributes) { attributes.getHostId().ifPresent(this::id); attributes.getDockerImage().ifPresent(this::currentDockerImage); attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion); attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration); attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration); trustStore(attributes.getTrustStore()); attributes.getWireguardPubkey().ifPresent(this::wireguardPubkey); this.reports.updateFromRawMap(attributes.getReports()); return this; } public String hostname() { return hostname; } public Optional<DockerImage> wantedDockerImage() { return wantedDockerImage; } public Optional<DockerImage> currentDockerImage() { return currentDockerImage; } public NodeState state() { return state; } public NodeType type() { return type; } public CloudAccount cloudAccount() { return cloudAccount; } public String flavor() { return flavor; } public Optional<Version> wantedVespaVersion() { return wantedVespaVersion; } public Optional<Version> currentVespaVersion() { return currentVespaVersion; } public Optional<Version> wantedOsVersion() { return wantedOsVersion; } public Optional<Version> currentOsVersion() { return currentOsVersion; } public OrchestratorStatus orchestratorStatus() { return orchestratorStatus; } public Optional<ApplicationId> owner() { return owner; } public Optional<NodeMembership> membership() { return membership; } public Optional<Long> wantedRestartGeneration() { return wantedRestartGeneration; } public Optional<Long> currentRestartGeneration() { return currentRestartGeneration; } public long wantedRebootGeneration() { return wantedRebootGeneration; } public long currentRebootGeneration() { return currentRebootGeneration; } public NodeResources resources() { return resources; } public NodeResources realResources() { return realResources; } public Set<String> ipAddresses() { return ipAddresses; } public Set<String> additionalIpAddresses() { return additionalIpAddresses; } public NodeReports reports() { return reports; } public List<Event> events() { return events; } public Optional<String> parentHostname() { return parentHostname; } public Optional<URI> archiveUri() { return archiveUri; } public NodeSpec build() { return new NodeSpec(hostname, id, wantedDockerImage, currentDockerImage, state, type, cloudAccount, flavor, wantedVespaVersion, currentVespaVersion, wantedOsVersion, currentOsVersion, orchestratorStatus, owner, membership, wantedRestartGeneration, currentRestartGeneration, wantedRebootGeneration, currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, modelName, resources, realResources, ipAddresses, additionalIpAddresses, reports, events, parentHostname, archiveUri, exclusiveTo, trustStore, wireguardPubkey, wantToRebuild); } public static Builder testSpec(String hostname) { return testSpec(hostname, NodeState.active); } /** * Creates a NodeSpec.Builder that has the given hostname, in a given state, and some * reasonable values for the remaining required NodeSpec fields. */ public static Builder testSpec(String hostname, NodeState state) { Builder builder = new Builder() .id(hostname) .hostname(hostname) .state(state) .type(NodeType.tenant) .flavor("d-2-8-50") .resources(new NodeResources(2, 8, 50, 10)) .realResources(new NodeResources(2, 8, 50, 10)) .events(List.of(new Event("operator", "rebooted", Instant.EPOCH))); if (EnumSet.of(NodeState.active, NodeState.inactive, NodeState.reserved).contains(state)) { builder .owner(ApplicationId.defaultId()) .membership(new NodeMembership("container", "my-id", "group", 0, false)) .wantedVespaVersion(Version.fromString("7.1.1")) .wantedDockerImage(DockerImage.fromString("docker.domain.tld/repo/image:7.1.1")) .currentRestartGeneration(0) .wantedRestartGeneration(0); } return builder; } }
class Builder { private String hostname; private String id; private NodeState state; private NodeType type; private CloudAccount cloudAccount = CloudAccount.empty; private String flavor; private Optional<DockerImage> wantedDockerImage = Optional.empty(); private Optional<DockerImage> currentDockerImage = Optional.empty(); private Optional<Version> wantedVespaVersion = Optional.empty(); private Optional<Version> currentVespaVersion = Optional.empty(); private Optional<Version> wantedOsVersion = Optional.empty(); private Optional<Version> currentOsVersion = Optional.empty(); private OrchestratorStatus orchestratorStatus = OrchestratorStatus.NO_REMARKS; private Optional<ApplicationId> owner = Optional.empty(); private Optional<NodeMembership> membership = Optional.empty(); private Optional<Long> wantedRestartGeneration = Optional.empty(); private Optional<Long> currentRestartGeneration = Optional.empty(); private long wantedRebootGeneration; private long currentRebootGeneration; private Optional<Instant> wantedFirmwareCheck = Optional.empty(); private Optional<Instant> currentFirmwareCheck = Optional.empty(); private Optional<String> modelName = Optional.empty(); private NodeResources resources; private NodeResources realResources; private Set<String> ipAddresses = Set.of(); private Set<String> additionalIpAddresses = Set.of(); private NodeReports reports = new NodeReports(); private List<Event> events = List.of(); private Optional<String> parentHostname = Optional.empty(); private Optional<URI> archiveUri = Optional.empty(); private Optional<ApplicationId> exclusiveTo = Optional.empty(); private List<TrustStoreItem> trustStore = List.of(); private Optional<WireguardKey> wireguardPubkey = Optional.empty(); private boolean wantToRebuild = false; public Builder() {} public Builder(NodeSpec node) { hostname(node.hostname); id(node.id); state(node.state); type(node.type); flavor(node.flavor); resources(node.resources); realResources(node.realResources); ipAddresses(node.ipAddresses); additionalIpAddresses(node.additionalIpAddresses); wantedRebootGeneration(node.wantedRebootGeneration); currentRebootGeneration(node.currentRebootGeneration); orchestratorStatus(node.orchestratorStatus); reports(new NodeReports(node.reports)); events(node.events); node.wantedDockerImage.ifPresent(this::wantedDockerImage); node.currentDockerImage.ifPresent(this::currentDockerImage); node.wantedVespaVersion.ifPresent(this::wantedVespaVersion); node.currentVespaVersion.ifPresent(this::currentVespaVersion); node.wantedOsVersion.ifPresent(this::wantedOsVersion); node.currentOsVersion.ifPresent(this::currentOsVersion); node.owner.ifPresent(this::owner); node.membership.ifPresent(this::membership); node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration); node.currentRestartGeneration.ifPresent(this::currentRestartGeneration); node.wantedFirmwareCheck.ifPresent(this::wantedFirmwareCheck); node.currentFirmwareCheck.ifPresent(this::currentFirmwareCheck); node.parentHostname.ifPresent(this::parentHostname); node.archiveUri.ifPresent(this::archiveUri); node.exclusiveTo.ifPresent(this::exclusiveTo); trustStore(node.trustStore); node.wireguardPubkey.ifPresent(this::wireguardPubkey); wantToRebuild(node.wantToRebuild); } public Builder hostname(String hostname) { this.hostname = hostname; return this; } public Builder id(String id) { this.id = id; return this; } public Builder wantedDockerImage(DockerImage wantedDockerImage) { this.wantedDockerImage = Optional.of(wantedDockerImage); return this; } public Builder currentDockerImage(DockerImage currentDockerImage) { this.currentDockerImage = Optional.of(currentDockerImage); return this; } public Builder state(NodeState state) { this.state = state; return this; } public Builder type(NodeType nodeType) { this.type = nodeType; return this; } public Builder flavor(String flavor) { this.flavor = flavor; return this; } public Builder wantedVespaVersion(Version wantedVespaVersion) { this.wantedVespaVersion = Optional.of(wantedVespaVersion); return this; } public Builder currentVespaVersion(Version vespaVersion) { this.currentVespaVersion = Optional.of(vespaVersion); return this; } public Builder wantedOsVersion(Version wantedOsVersion) { this.wantedOsVersion = Optional.of(wantedOsVersion); return this; } public Builder currentOsVersion(Version currentOsVersion) { this.currentOsVersion = Optional.of(currentOsVersion); return this; } public Builder orchestratorStatus(OrchestratorStatus orchestratorStatus) { this.orchestratorStatus = orchestratorStatus; return this; } public Builder owner(ApplicationId owner) { this.owner = Optional.of(owner); return this; } public Builder membership(NodeMembership membership) { this.membership = Optional.of(membership); return this; } public Builder wantedRestartGeneration(long wantedRestartGeneration) { this.wantedRestartGeneration = Optional.of(wantedRestartGeneration); return this; } public Builder currentRestartGeneration(long currentRestartGeneration) { this.currentRestartGeneration = Optional.of(currentRestartGeneration); return this; } public Builder wantedRebootGeneration(long wantedRebootGeneration) { this.wantedRebootGeneration = wantedRebootGeneration; return this; } public Builder currentRebootGeneration(long currentRebootGeneration) { this.currentRebootGeneration = currentRebootGeneration; return this; } public Builder wantedFirmwareCheck(Instant wantedFirmwareCheck) { this.wantedFirmwareCheck = Optional.of(wantedFirmwareCheck); return this; } public Builder currentFirmwareCheck(Instant currentFirmwareCheck) { this.currentFirmwareCheck = Optional.of(currentFirmwareCheck); return this; } public Builder resources(NodeResources resources) { this.resources = resources; return this; } public Builder realResources(NodeResources realResources) { this.realResources = realResources; return this; } public Builder vcpu(double vcpu) { return realResources(realResources.withVcpu(vcpu)); } public Builder memoryGb(double memoryGb) { return realResources(realResources.withMemoryGb(memoryGb)); } public Builder diskGb(double diskGb) { return realResources(realResources.withDiskGb(diskGb)); } public Builder fastDisk(boolean fastDisk) { return realResources(realResources.with(fastDisk ? fast : slow)); } public Builder bandwidthGbps(double bandwidthGbps) { return realResources(realResources.withBandwidthGbps(bandwidthGbps)); } public Builder ipAddresses(Set<String> ipAddresses) { this.ipAddresses = ipAddresses; return this; } public Builder additionalIpAddresses(Set<String> additionalIpAddresses) { this.additionalIpAddresses = additionalIpAddresses; return this; } public Builder reports(NodeReports reports) { this.reports = reports; return this; } public Builder report(String reportId, JsonNode report) { this.reports.setReport(reportId, report); return this; } public Builder removeReport(String reportId) { reports.removeReport(reportId); return this; } public Builder events(List<Event> events) { this.events = events; return this; } public Builder parentHostname(String parentHostname) { this.parentHostname = Optional.of(parentHostname); return this; } public Builder archiveUri(URI archiveUri) { this.archiveUri = Optional.of(archiveUri); return this; } public Builder exclusiveTo(ApplicationId applicationId) { this.exclusiveTo = Optional.of(applicationId); return this; } public Builder trustStore(List<TrustStoreItem> trustStore) { this.trustStore = List.copyOf(trustStore); return this; } public Builder wireguardPubkey(WireguardKey wireguardKey) { wireguardPubkey = Optional.of(wireguardKey); return this; } public Builder wantToRebuild(boolean wantToRebuild) { this.wantToRebuild = wantToRebuild; return this; } public Builder updateFromNodeAttributes(NodeAttributes attributes) { attributes.getHostId().ifPresent(this::id); attributes.getDockerImage().ifPresent(this::currentDockerImage); attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion); attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration); attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration); trustStore(attributes.getTrustStore()); attributes.getWireguardPubkey().ifPresent(this::wireguardPubkey); this.reports.updateFromRawMap(attributes.getReports()); return this; } public String hostname() { return hostname; } public Optional<DockerImage> wantedDockerImage() { return wantedDockerImage; } public Optional<DockerImage> currentDockerImage() { return currentDockerImage; } public NodeState state() { return state; } public NodeType type() { return type; } public CloudAccount cloudAccount() { return cloudAccount; } public String flavor() { return flavor; } public Optional<Version> wantedVespaVersion() { return wantedVespaVersion; } public Optional<Version> currentVespaVersion() { return currentVespaVersion; } public Optional<Version> wantedOsVersion() { return wantedOsVersion; } public Optional<Version> currentOsVersion() { return currentOsVersion; } public OrchestratorStatus orchestratorStatus() { return orchestratorStatus; } public Optional<ApplicationId> owner() { return owner; } public Optional<NodeMembership> membership() { return membership; } public Optional<Long> wantedRestartGeneration() { return wantedRestartGeneration; } public Optional<Long> currentRestartGeneration() { return currentRestartGeneration; } public long wantedRebootGeneration() { return wantedRebootGeneration; } public long currentRebootGeneration() { return currentRebootGeneration; } public NodeResources resources() { return resources; } public NodeResources realResources() { return realResources; } public Set<String> ipAddresses() { return ipAddresses; } public Set<String> additionalIpAddresses() { return additionalIpAddresses; } public NodeReports reports() { return reports; } public List<Event> events() { return events; } public Optional<String> parentHostname() { return parentHostname; } public Optional<URI> archiveUri() { return archiveUri; } public NodeSpec build() { return new NodeSpec(hostname, id, wantedDockerImage, currentDockerImage, state, type, cloudAccount, flavor, wantedVespaVersion, currentVespaVersion, wantedOsVersion, currentOsVersion, orchestratorStatus, owner, membership, wantedRestartGeneration, currentRestartGeneration, wantedRebootGeneration, currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, modelName, resources, realResources, ipAddresses, additionalIpAddresses, reports, events, parentHostname, archiveUri, exclusiveTo, trustStore, wireguardPubkey, wantToRebuild); } public static Builder testSpec(String hostname) { return testSpec(hostname, NodeState.active); } /** * Creates a NodeSpec.Builder that has the given hostname, in a given state, and some * reasonable values for the remaining required NodeSpec fields. */ public static Builder testSpec(String hostname, NodeState state) { Builder builder = new Builder() .id(hostname) .hostname(hostname) .state(state) .type(NodeType.tenant) .flavor("d-2-8-50") .resources(new NodeResources(2, 8, 50, 10)) .realResources(new NodeResources(2, 8, 50, 10)) .events(List.of(new Event("operator", "rebooted", Instant.EPOCH))); if (EnumSet.of(NodeState.active, NodeState.inactive, NodeState.reserved).contains(state)) { builder .owner(ApplicationId.defaultId()) .membership(new NodeMembership("container", "my-id", "group", 0, false)) .wantedVespaVersion(Version.fromString("7.1.1")) .wantedDockerImage(DockerImage.fromString("docker.domain.tld/repo/image:7.1.1")) .currentRestartGeneration(0) .wantedRestartGeneration(0); } return builder; } }
We don't have any validation in any other builder methods in this class and instead rely on the validation in the main constructor
public Builder cloudAccount(CloudAccount cloudAccount) { this.cloudAccount = cloudAccount; return this; }
this.cloudAccount = cloudAccount;
public Builder cloudAccount(CloudAccount cloudAccount) { this.cloudAccount = cloudAccount; return this; }
class Builder { private String hostname; private String id; private NodeState state; private NodeType type; private CloudAccount cloudAccount = CloudAccount.empty; private String flavor; private Optional<DockerImage> wantedDockerImage = Optional.empty(); private Optional<DockerImage> currentDockerImage = Optional.empty(); private Optional<Version> wantedVespaVersion = Optional.empty(); private Optional<Version> currentVespaVersion = Optional.empty(); private Optional<Version> wantedOsVersion = Optional.empty(); private Optional<Version> currentOsVersion = Optional.empty(); private OrchestratorStatus orchestratorStatus = OrchestratorStatus.NO_REMARKS; private Optional<ApplicationId> owner = Optional.empty(); private Optional<NodeMembership> membership = Optional.empty(); private Optional<Long> wantedRestartGeneration = Optional.empty(); private Optional<Long> currentRestartGeneration = Optional.empty(); private long wantedRebootGeneration; private long currentRebootGeneration; private Optional<Instant> wantedFirmwareCheck = Optional.empty(); private Optional<Instant> currentFirmwareCheck = Optional.empty(); private Optional<String> modelName = Optional.empty(); private NodeResources resources; private NodeResources realResources; private Set<String> ipAddresses = Set.of(); private Set<String> additionalIpAddresses = Set.of(); private NodeReports reports = new NodeReports(); private List<Event> events = List.of(); private Optional<String> parentHostname = Optional.empty(); private Optional<URI> archiveUri = Optional.empty(); private Optional<ApplicationId> exclusiveTo = Optional.empty(); private List<TrustStoreItem> trustStore = List.of(); private Optional<WireguardKey> wireguardPubkey = Optional.empty(); private boolean wantToRebuild = false; public Builder() {} public Builder(NodeSpec node) { hostname(node.hostname); id(node.id); state(node.state); type(node.type); flavor(node.flavor); resources(node.resources); realResources(node.realResources); ipAddresses(node.ipAddresses); additionalIpAddresses(node.additionalIpAddresses); wantedRebootGeneration(node.wantedRebootGeneration); currentRebootGeneration(node.currentRebootGeneration); orchestratorStatus(node.orchestratorStatus); reports(new NodeReports(node.reports)); events(node.events); node.wantedDockerImage.ifPresent(this::wantedDockerImage); node.currentDockerImage.ifPresent(this::currentDockerImage); node.wantedVespaVersion.ifPresent(this::wantedVespaVersion); node.currentVespaVersion.ifPresent(this::currentVespaVersion); node.wantedOsVersion.ifPresent(this::wantedOsVersion); node.currentOsVersion.ifPresent(this::currentOsVersion); node.owner.ifPresent(this::owner); node.membership.ifPresent(this::membership); node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration); node.currentRestartGeneration.ifPresent(this::currentRestartGeneration); node.wantedFirmwareCheck.ifPresent(this::wantedFirmwareCheck); node.currentFirmwareCheck.ifPresent(this::currentFirmwareCheck); node.parentHostname.ifPresent(this::parentHostname); node.archiveUri.ifPresent(this::archiveUri); node.exclusiveTo.ifPresent(this::exclusiveTo); trustStore(node.trustStore); node.wireguardPubkey.ifPresent(this::wireguardPubkey); wantToRebuild(node.wantToRebuild); } public Builder hostname(String hostname) { this.hostname = hostname; return this; } public Builder id(String id) { this.id = id; return this; } public Builder wantedDockerImage(DockerImage wantedDockerImage) { this.wantedDockerImage = Optional.of(wantedDockerImage); return this; } public Builder currentDockerImage(DockerImage currentDockerImage) { this.currentDockerImage = Optional.of(currentDockerImage); return this; } public Builder state(NodeState state) { this.state = state; return this; } public Builder type(NodeType nodeType) { this.type = nodeType; return this; } public Builder flavor(String flavor) { this.flavor = flavor; return this; } public Builder wantedVespaVersion(Version wantedVespaVersion) { this.wantedVespaVersion = Optional.of(wantedVespaVersion); return this; } public Builder currentVespaVersion(Version vespaVersion) { this.currentVespaVersion = Optional.of(vespaVersion); return this; } public Builder wantedOsVersion(Version wantedOsVersion) { this.wantedOsVersion = Optional.of(wantedOsVersion); return this; } public Builder currentOsVersion(Version currentOsVersion) { this.currentOsVersion = Optional.of(currentOsVersion); return this; } public Builder orchestratorStatus(OrchestratorStatus orchestratorStatus) { this.orchestratorStatus = orchestratorStatus; return this; } public Builder owner(ApplicationId owner) { this.owner = Optional.of(owner); return this; } public Builder membership(NodeMembership membership) { this.membership = Optional.of(membership); return this; } public Builder wantedRestartGeneration(long wantedRestartGeneration) { this.wantedRestartGeneration = Optional.of(wantedRestartGeneration); return this; } public Builder currentRestartGeneration(long currentRestartGeneration) { this.currentRestartGeneration = Optional.of(currentRestartGeneration); return this; } public Builder wantedRebootGeneration(long wantedRebootGeneration) { this.wantedRebootGeneration = wantedRebootGeneration; return this; } public Builder currentRebootGeneration(long currentRebootGeneration) { this.currentRebootGeneration = currentRebootGeneration; return this; } public Builder wantedFirmwareCheck(Instant wantedFirmwareCheck) { this.wantedFirmwareCheck = Optional.of(wantedFirmwareCheck); return this; } public Builder currentFirmwareCheck(Instant currentFirmwareCheck) { this.currentFirmwareCheck = Optional.of(currentFirmwareCheck); return this; } public Builder resources(NodeResources resources) { this.resources = resources; return this; } public Builder realResources(NodeResources realResources) { this.realResources = realResources; return this; } public Builder vcpu(double vcpu) { return realResources(realResources.withVcpu(vcpu)); } public Builder memoryGb(double memoryGb) { return realResources(realResources.withMemoryGb(memoryGb)); } public Builder diskGb(double diskGb) { return realResources(realResources.withDiskGb(diskGb)); } public Builder fastDisk(boolean fastDisk) { return realResources(realResources.with(fastDisk ? fast : slow)); } public Builder bandwidthGbps(double bandwidthGbps) { return realResources(realResources.withBandwidthGbps(bandwidthGbps)); } public Builder ipAddresses(Set<String> ipAddresses) { this.ipAddresses = ipAddresses; return this; } public Builder additionalIpAddresses(Set<String> additionalIpAddresses) { this.additionalIpAddresses = additionalIpAddresses; return this; } public Builder reports(NodeReports reports) { this.reports = reports; return this; } public Builder report(String reportId, JsonNode report) { this.reports.setReport(reportId, report); return this; } public Builder removeReport(String reportId) { reports.removeReport(reportId); return this; } public Builder events(List<Event> events) { this.events = events; return this; } public Builder parentHostname(String parentHostname) { this.parentHostname = Optional.of(parentHostname); return this; } public Builder archiveUri(URI archiveUri) { this.archiveUri = Optional.of(archiveUri); return this; } public Builder exclusiveTo(ApplicationId applicationId) { this.exclusiveTo = Optional.of(applicationId); return this; } public Builder trustStore(List<TrustStoreItem> trustStore) { this.trustStore = List.copyOf(trustStore); return this; } public Builder wireguardPubkey(WireguardKey wireguardKey) { wireguardPubkey = Optional.of(wireguardKey); return this; } public Builder wantToRebuild(boolean wantToRebuild) { this.wantToRebuild = wantToRebuild; return this; } public Builder updateFromNodeAttributes(NodeAttributes attributes) { attributes.getHostId().ifPresent(this::id); attributes.getDockerImage().ifPresent(this::currentDockerImage); attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion); attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration); attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration); trustStore(attributes.getTrustStore()); attributes.getWireguardPubkey().ifPresent(this::wireguardPubkey); this.reports.updateFromRawMap(attributes.getReports()); return this; } public String hostname() { return hostname; } public Optional<DockerImage> wantedDockerImage() { return wantedDockerImage; } public Optional<DockerImage> currentDockerImage() { return currentDockerImage; } public NodeState state() { return state; } public NodeType type() { return type; } public CloudAccount cloudAccount() { return cloudAccount; } public String flavor() { return flavor; } public Optional<Version> wantedVespaVersion() { return wantedVespaVersion; } public Optional<Version> currentVespaVersion() { return currentVespaVersion; } public Optional<Version> wantedOsVersion() { return wantedOsVersion; } public Optional<Version> currentOsVersion() { return currentOsVersion; } public OrchestratorStatus orchestratorStatus() { return orchestratorStatus; } public Optional<ApplicationId> owner() { return owner; } public Optional<NodeMembership> membership() { return membership; } public Optional<Long> wantedRestartGeneration() { return wantedRestartGeneration; } public Optional<Long> currentRestartGeneration() { return currentRestartGeneration; } public long wantedRebootGeneration() { return wantedRebootGeneration; } public long currentRebootGeneration() { return currentRebootGeneration; } public NodeResources resources() { return resources; } public NodeResources realResources() { return realResources; } public Set<String> ipAddresses() { return ipAddresses; } public Set<String> additionalIpAddresses() { return additionalIpAddresses; } public NodeReports reports() { return reports; } public List<Event> events() { return events; } public Optional<String> parentHostname() { return parentHostname; } public Optional<URI> archiveUri() { return archiveUri; } public NodeSpec build() { return new NodeSpec(hostname, id, wantedDockerImage, currentDockerImage, state, type, cloudAccount, flavor, wantedVespaVersion, currentVespaVersion, wantedOsVersion, currentOsVersion, orchestratorStatus, owner, membership, wantedRestartGeneration, currentRestartGeneration, wantedRebootGeneration, currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, modelName, resources, realResources, ipAddresses, additionalIpAddresses, reports, events, parentHostname, archiveUri, exclusiveTo, trustStore, wireguardPubkey, wantToRebuild); } public static Builder testSpec(String hostname) { return testSpec(hostname, NodeState.active); } /** * Creates a NodeSpec.Builder that has the given hostname, in a given state, and some * reasonable values for the remaining required NodeSpec fields. */ public static Builder testSpec(String hostname, NodeState state) { Builder builder = new Builder() .id(hostname) .hostname(hostname) .state(state) .type(NodeType.tenant) .flavor("d-2-8-50") .resources(new NodeResources(2, 8, 50, 10)) .realResources(new NodeResources(2, 8, 50, 10)) .events(List.of(new Event("operator", "rebooted", Instant.EPOCH))); if (EnumSet.of(NodeState.active, NodeState.inactive, NodeState.reserved).contains(state)) { builder .owner(ApplicationId.defaultId()) .membership(new NodeMembership("container", "my-id", "group", 0, false)) .wantedVespaVersion(Version.fromString("7.1.1")) .wantedDockerImage(DockerImage.fromString("docker.domain.tld/repo/image:7.1.1")) .currentRestartGeneration(0) .wantedRestartGeneration(0); } return builder; } }
class Builder { private String hostname; private String id; private NodeState state; private NodeType type; private CloudAccount cloudAccount = CloudAccount.empty; private String flavor; private Optional<DockerImage> wantedDockerImage = Optional.empty(); private Optional<DockerImage> currentDockerImage = Optional.empty(); private Optional<Version> wantedVespaVersion = Optional.empty(); private Optional<Version> currentVespaVersion = Optional.empty(); private Optional<Version> wantedOsVersion = Optional.empty(); private Optional<Version> currentOsVersion = Optional.empty(); private OrchestratorStatus orchestratorStatus = OrchestratorStatus.NO_REMARKS; private Optional<ApplicationId> owner = Optional.empty(); private Optional<NodeMembership> membership = Optional.empty(); private Optional<Long> wantedRestartGeneration = Optional.empty(); private Optional<Long> currentRestartGeneration = Optional.empty(); private long wantedRebootGeneration; private long currentRebootGeneration; private Optional<Instant> wantedFirmwareCheck = Optional.empty(); private Optional<Instant> currentFirmwareCheck = Optional.empty(); private Optional<String> modelName = Optional.empty(); private NodeResources resources; private NodeResources realResources; private Set<String> ipAddresses = Set.of(); private Set<String> additionalIpAddresses = Set.of(); private NodeReports reports = new NodeReports(); private List<Event> events = List.of(); private Optional<String> parentHostname = Optional.empty(); private Optional<URI> archiveUri = Optional.empty(); private Optional<ApplicationId> exclusiveTo = Optional.empty(); private List<TrustStoreItem> trustStore = List.of(); private Optional<WireguardKey> wireguardPubkey = Optional.empty(); private boolean wantToRebuild = false; public Builder() {} public Builder(NodeSpec node) { hostname(node.hostname); id(node.id); state(node.state); type(node.type); flavor(node.flavor); resources(node.resources); realResources(node.realResources); ipAddresses(node.ipAddresses); additionalIpAddresses(node.additionalIpAddresses); wantedRebootGeneration(node.wantedRebootGeneration); currentRebootGeneration(node.currentRebootGeneration); orchestratorStatus(node.orchestratorStatus); reports(new NodeReports(node.reports)); events(node.events); node.wantedDockerImage.ifPresent(this::wantedDockerImage); node.currentDockerImage.ifPresent(this::currentDockerImage); node.wantedVespaVersion.ifPresent(this::wantedVespaVersion); node.currentVespaVersion.ifPresent(this::currentVespaVersion); node.wantedOsVersion.ifPresent(this::wantedOsVersion); node.currentOsVersion.ifPresent(this::currentOsVersion); node.owner.ifPresent(this::owner); node.membership.ifPresent(this::membership); node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration); node.currentRestartGeneration.ifPresent(this::currentRestartGeneration); node.wantedFirmwareCheck.ifPresent(this::wantedFirmwareCheck); node.currentFirmwareCheck.ifPresent(this::currentFirmwareCheck); node.parentHostname.ifPresent(this::parentHostname); node.archiveUri.ifPresent(this::archiveUri); node.exclusiveTo.ifPresent(this::exclusiveTo); trustStore(node.trustStore); node.wireguardPubkey.ifPresent(this::wireguardPubkey); wantToRebuild(node.wantToRebuild); } public Builder hostname(String hostname) { this.hostname = hostname; return this; } public Builder id(String id) { this.id = id; return this; } public Builder wantedDockerImage(DockerImage wantedDockerImage) { this.wantedDockerImage = Optional.of(wantedDockerImage); return this; } public Builder currentDockerImage(DockerImage currentDockerImage) { this.currentDockerImage = Optional.of(currentDockerImage); return this; } public Builder state(NodeState state) { this.state = state; return this; } public Builder type(NodeType nodeType) { this.type = nodeType; return this; } public Builder flavor(String flavor) { this.flavor = flavor; return this; } public Builder wantedVespaVersion(Version wantedVespaVersion) { this.wantedVespaVersion = Optional.of(wantedVespaVersion); return this; } public Builder currentVespaVersion(Version vespaVersion) { this.currentVespaVersion = Optional.of(vespaVersion); return this; } public Builder wantedOsVersion(Version wantedOsVersion) { this.wantedOsVersion = Optional.of(wantedOsVersion); return this; } public Builder currentOsVersion(Version currentOsVersion) { this.currentOsVersion = Optional.of(currentOsVersion); return this; } public Builder orchestratorStatus(OrchestratorStatus orchestratorStatus) { this.orchestratorStatus = orchestratorStatus; return this; } public Builder owner(ApplicationId owner) { this.owner = Optional.of(owner); return this; } public Builder membership(NodeMembership membership) { this.membership = Optional.of(membership); return this; } public Builder wantedRestartGeneration(long wantedRestartGeneration) { this.wantedRestartGeneration = Optional.of(wantedRestartGeneration); return this; } public Builder currentRestartGeneration(long currentRestartGeneration) { this.currentRestartGeneration = Optional.of(currentRestartGeneration); return this; } public Builder wantedRebootGeneration(long wantedRebootGeneration) { this.wantedRebootGeneration = wantedRebootGeneration; return this; } public Builder currentRebootGeneration(long currentRebootGeneration) { this.currentRebootGeneration = currentRebootGeneration; return this; } public Builder wantedFirmwareCheck(Instant wantedFirmwareCheck) { this.wantedFirmwareCheck = Optional.of(wantedFirmwareCheck); return this; } public Builder currentFirmwareCheck(Instant currentFirmwareCheck) { this.currentFirmwareCheck = Optional.of(currentFirmwareCheck); return this; } public Builder resources(NodeResources resources) { this.resources = resources; return this; } public Builder realResources(NodeResources realResources) { this.realResources = realResources; return this; } public Builder vcpu(double vcpu) { return realResources(realResources.withVcpu(vcpu)); } public Builder memoryGb(double memoryGb) { return realResources(realResources.withMemoryGb(memoryGb)); } public Builder diskGb(double diskGb) { return realResources(realResources.withDiskGb(diskGb)); } public Builder fastDisk(boolean fastDisk) { return realResources(realResources.with(fastDisk ? fast : slow)); } public Builder bandwidthGbps(double bandwidthGbps) { return realResources(realResources.withBandwidthGbps(bandwidthGbps)); } public Builder ipAddresses(Set<String> ipAddresses) { this.ipAddresses = ipAddresses; return this; } public Builder additionalIpAddresses(Set<String> additionalIpAddresses) { this.additionalIpAddresses = additionalIpAddresses; return this; } public Builder reports(NodeReports reports) { this.reports = reports; return this; } public Builder report(String reportId, JsonNode report) { this.reports.setReport(reportId, report); return this; } public Builder removeReport(String reportId) { reports.removeReport(reportId); return this; } public Builder events(List<Event> events) { this.events = events; return this; } public Builder parentHostname(String parentHostname) { this.parentHostname = Optional.of(parentHostname); return this; } public Builder archiveUri(URI archiveUri) { this.archiveUri = Optional.of(archiveUri); return this; } public Builder exclusiveTo(ApplicationId applicationId) { this.exclusiveTo = Optional.of(applicationId); return this; } public Builder trustStore(List<TrustStoreItem> trustStore) { this.trustStore = List.copyOf(trustStore); return this; } public Builder wireguardPubkey(WireguardKey wireguardKey) { wireguardPubkey = Optional.of(wireguardKey); return this; } public Builder wantToRebuild(boolean wantToRebuild) { this.wantToRebuild = wantToRebuild; return this; } public Builder updateFromNodeAttributes(NodeAttributes attributes) { attributes.getHostId().ifPresent(this::id); attributes.getDockerImage().ifPresent(this::currentDockerImage); attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion); attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration); attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration); trustStore(attributes.getTrustStore()); attributes.getWireguardPubkey().ifPresent(this::wireguardPubkey); this.reports.updateFromRawMap(attributes.getReports()); return this; } public String hostname() { return hostname; } public Optional<DockerImage> wantedDockerImage() { return wantedDockerImage; } public Optional<DockerImage> currentDockerImage() { return currentDockerImage; } public NodeState state() { return state; } public NodeType type() { return type; } public CloudAccount cloudAccount() { return cloudAccount; } public String flavor() { return flavor; } public Optional<Version> wantedVespaVersion() { return wantedVespaVersion; } public Optional<Version> currentVespaVersion() { return currentVespaVersion; } public Optional<Version> wantedOsVersion() { return wantedOsVersion; } public Optional<Version> currentOsVersion() { return currentOsVersion; } public OrchestratorStatus orchestratorStatus() { return orchestratorStatus; } public Optional<ApplicationId> owner() { return owner; } public Optional<NodeMembership> membership() { return membership; } public Optional<Long> wantedRestartGeneration() { return wantedRestartGeneration; } public Optional<Long> currentRestartGeneration() { return currentRestartGeneration; } public long wantedRebootGeneration() { return wantedRebootGeneration; } public long currentRebootGeneration() { return currentRebootGeneration; } public NodeResources resources() { return resources; } public NodeResources realResources() { return realResources; } public Set<String> ipAddresses() { return ipAddresses; } public Set<String> additionalIpAddresses() { return additionalIpAddresses; } public NodeReports reports() { return reports; } public List<Event> events() { return events; } public Optional<String> parentHostname() { return parentHostname; } public Optional<URI> archiveUri() { return archiveUri; } public NodeSpec build() { return new NodeSpec(hostname, id, wantedDockerImage, currentDockerImage, state, type, cloudAccount, flavor, wantedVespaVersion, currentVespaVersion, wantedOsVersion, currentOsVersion, orchestratorStatus, owner, membership, wantedRestartGeneration, currentRestartGeneration, wantedRebootGeneration, currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, modelName, resources, realResources, ipAddresses, additionalIpAddresses, reports, events, parentHostname, archiveUri, exclusiveTo, trustStore, wireguardPubkey, wantToRebuild); } public static Builder testSpec(String hostname) { return testSpec(hostname, NodeState.active); } /** * Creates a NodeSpec.Builder that has the given hostname, in a given state, and some * reasonable values for the remaining required NodeSpec fields. */ public static Builder testSpec(String hostname, NodeState state) { Builder builder = new Builder() .id(hostname) .hostname(hostname) .state(state) .type(NodeType.tenant) .flavor("d-2-8-50") .resources(new NodeResources(2, 8, 50, 10)) .realResources(new NodeResources(2, 8, 50, 10)) .events(List.of(new Event("operator", "rebooted", Instant.EPOCH))); if (EnumSet.of(NodeState.active, NodeState.inactive, NodeState.reserved).contains(state)) { builder .owner(ApplicationId.defaultId()) .membership(new NodeMembership("container", "my-id", "group", 0, false)) .wantedVespaVersion(Version.fromString("7.1.1")) .wantedDockerImage(DockerImage.fromString("docker.domain.tld/repo/image:7.1.1")) .currentRestartGeneration(0) .wantedRestartGeneration(0); } return builder; } }
super() should be first in method
protected void doPrepare(DeployState deployState) { addAndSendApplicationBundles(deployState); sendUserConfiguredFiles(deployState); createEndpointList(deployState); super.doPrepare(deployState); }
super.doPrepare(deployState);
protected void doPrepare(DeployState deployState) { super.doPrepare(deployState); addAndSendApplicationBundles(deployState); sendUserConfiguredFiles(deployState); createEndpointList(deployState); }
class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements ApplicationBundlesConfig.Producer, QrStartConfig.Producer, RankProfilesConfig.Producer, RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Producer, ContainerMbusConfig.Producer, MetricsProxyApiConfig.Producer, ZookeeperServerConfig.Producer, ApplicationClusterInfo { public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName(); public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH); public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*"); public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName(); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*"); public static final int defaultHeapSizePercentageOfTotalNodeMemory = 70; public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18; private final Set<FileReference> applicationBundles = new LinkedHashSet<>(); private final Set<String> previousHosts; private ContainerModelEvaluation modelEvaluation; private final Optional<String> tlsClientAuthority; private MbusParams mbusParams; private boolean messageBusEnabled = true; private int zookeeperSessionTimeoutSeconds = 30; private final int transport_events_before_wakeup; private final int transport_connections_per_target; private final int heapSizePercentageOfTotalNodeMemory; private Integer memoryPercentage = null; private List<ApplicationClusterEndpoint> endpointList = List.of(); public ApplicationContainerCluster(TreeConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) { super(parent, configSubId, clusterId, deployState, true, 10); this.tlsClientAuthority = deployState.tlsClientAuthority(); previousHosts = Collections.unmodifiableSet(deployState.getPreviousModel().stream() .map(Model::allocatedHosts) .map(AllocatedHosts::getHosts) .flatMap(Collection::stream) .map(HostSpec::hostname) .collect(Collectors.toCollection(() -> new LinkedHashSet<>()))); addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider"); addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider"); addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"); addSimpleComponent("com.yahoo.container.core.documentapi.DocumentAccessProvider"); addSimpleComponent(DOCUMENT_TYPE_MANAGER_CLASS); addMetricsHandlers(); addTestrunnerComponentsIfTester(deployState); transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets(); transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup(); heapSizePercentageOfTotalNodeMemory = deployState.featureFlags().heapSizePercentage() > 0 ? Math.min(99, deployState.featureFlags().heapSizePercentage()) : defaultHeapSizePercentageOfTotalNodeMemory; } @Override private void addAndSendApplicationBundles(DeployState deployState) { for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) { FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir()); applicationBundles.add(reference); } } private void sendUserConfiguredFiles(DeployState deployState) { FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger()); for (Component<?, ?> component : getAllComponents()) { fileSender.sendUserConfiguredFiles(component); } } private void addMetricsHandlers() { addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2); addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2); } private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) { Handler handler = new Handler( new ComponentModel(handlerClass, null, null, null)); handler.addServerBindings(rootBinding, innerBinding); addComponent(handler); } private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components")); addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner")); addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api")); if(deployState.zone().system().isPublic()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd")); } } } public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) { this.modelEvaluation = modelEvaluation; } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } @Override public Optional<Integer> getMemoryPercentage() { if (memoryPercentage != null) { return Optional.of(memoryPercentage); } else if (isHostedVespa()) { return getHostClusterId().isPresent() ? Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) : Optional.of(heapSizePercentageOfTotalNodeMemory); } return Optional.empty(); } /** Create list of endpoints, these will be consumed later by LbServicesProducer */ private void createEndpointList(DeployState deployState) { if(!deployState.isHosted()) return; if(deployState.getProperties().applicationId().instance().isTester()) return; List<ApplicationClusterEndpoint> endpoints = new ArrayList<>(); List<String> hosts = getContainers().stream() .map(AbstractService::getHostName) .sorted() .toList(); for (String suffix : deployState.getProperties().zoneDnsSuffixes()) { ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom( deployState.zone().system(), ClusterSpec.Id.from(getName()), deployState.getProperties().applicationId(), suffix); endpoints.add(ApplicationClusterEndpoint.builder() .zoneScope() .sharedL4Routing() .dnsName(l4Name) .hosts(hosts) .clusterId(getName()) .build()); } Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints(); endpointsFromController.stream() .filter(ce -> ce.clusterId().equals(getName())) .filter(ce -> ce.routingMethod() == sharedLayer4) .forEach(ce -> ce.names().forEach( name -> endpoints.add(ApplicationClusterEndpoint.builder() .scope(ce.scope()) .weight(Long.valueOf(ce.weight().orElse(1)).intValue()) .routingMethod(ce.routingMethod()) .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) .hosts(hosts) .clusterId(getName()) .build()) )); endpointList = List.copyOf(endpoints); } @Override public void getConfig(ApplicationBundlesConfig.Builder builder) { applicationBundles.stream().map(FileReference::value) .forEach(builder::bundles); } @Override public void getConfig(RankProfilesConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(RankingConstantsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(OnnxModelsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } public void getConfig(RankingExpressionsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (getDocproc() != null) getDocproc().getConfig(builder); builder.transport_events_before_wakeup(transport_events_before_wakeup); builder.numconnectionspertarget(transport_connections_per_target); } @Override public void getConfig(MetricsProxyApiConfig.Builder builder) { builder.metricsPort(MetricsProxyContainer.BASEPORT) .metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH) .prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH); } @Override public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); builder.jvm.verbosegc(true) .availableProcessors(0) .compressedClassSpaceSize(0) .minHeapsize(1536) .heapsize(1536); if (getMemoryPercentage().isPresent()) { builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (Container container : getContainers()) { ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder(); serverBuilder.hostname(container.getHostName()) .id(container.index()) .joining( ! previousHosts.isEmpty() && ! previousHosts.contains(container.getHostName())) .retired(container.isRetired()); builder.server(serverBuilder); } builder.dynamicReconfiguration(true); } @Override public void getConfig(CuratorConfig.Builder builder) { super.getConfig(builder); if (getParent() instanceof ConfigserverCluster) return; builder.zookeeperSessionTimeoutSeconds(zookeeperSessionTimeoutSeconds); } public Optional<String> getTlsClientAuthority() { return tlsClientAuthority; } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } public void setZookeeperSessionTimeoutSeconds(int timeoutSeconds) { this.zookeeperSessionTimeoutSeconds = timeoutSeconds; } protected boolean messageBusEnabled() { return messageBusEnabled; } public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } @Override public List<ApplicationClusterEndpoint> endpoints() { return endpointList; } @Override public String name() { return getName(); } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements ApplicationBundlesConfig.Producer, QrStartConfig.Producer, RankProfilesConfig.Producer, RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Producer, ContainerMbusConfig.Producer, MetricsProxyApiConfig.Producer, ZookeeperServerConfig.Producer, ApplicationClusterInfo { public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName(); public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH); public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*"); public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName(); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*"); public static final int defaultHeapSizePercentageOfTotalNodeMemory = 70; public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18; private final Set<FileReference> applicationBundles = new LinkedHashSet<>(); private final Set<String> previousHosts; private ContainerModelEvaluation modelEvaluation; private final Optional<String> tlsClientAuthority; private MbusParams mbusParams; private boolean messageBusEnabled = true; private int zookeeperSessionTimeoutSeconds = 30; private final int transport_events_before_wakeup; private final int transport_connections_per_target; private final int heapSizePercentageOfTotalNodeMemory; private Integer memoryPercentage = null; private List<ApplicationClusterEndpoint> endpointList = List.of(); public ApplicationContainerCluster(TreeConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) { super(parent, configSubId, clusterId, deployState, true, 10); this.tlsClientAuthority = deployState.tlsClientAuthority(); previousHosts = Collections.unmodifiableSet(deployState.getPreviousModel().stream() .map(Model::allocatedHosts) .map(AllocatedHosts::getHosts) .flatMap(Collection::stream) .map(HostSpec::hostname) .collect(Collectors.toCollection(() -> new LinkedHashSet<>()))); addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider"); addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider"); addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"); addSimpleComponent("com.yahoo.container.core.documentapi.DocumentAccessProvider"); addSimpleComponent(DOCUMENT_TYPE_MANAGER_CLASS); addMetricsHandlers(); addTestrunnerComponentsIfTester(deployState); transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets(); transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup(); heapSizePercentageOfTotalNodeMemory = deployState.featureFlags().heapSizePercentage() > 0 ? Math.min(99, deployState.featureFlags().heapSizePercentage()) : defaultHeapSizePercentageOfTotalNodeMemory; } @Override private void addAndSendApplicationBundles(DeployState deployState) { for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) { FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir()); applicationBundles.add(reference); } } private void sendUserConfiguredFiles(DeployState deployState) { FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger()); for (Component<?, ?> component : getAllComponents()) { fileSender.sendUserConfiguredFiles(component); } } private void addMetricsHandlers() { addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2); addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2); } private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) { Handler handler = new Handler( new ComponentModel(handlerClass, null, null, null)); handler.addServerBindings(rootBinding, innerBinding); addComponent(handler); } private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components")); addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner")); addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api")); if(deployState.zone().system().isPublic()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd")); } } } public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) { this.modelEvaluation = modelEvaluation; } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } @Override public Optional<Integer> getMemoryPercentage() { if (memoryPercentage != null) { return Optional.of(memoryPercentage); } else if (isHostedVespa()) { return getHostClusterId().isPresent() ? Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) : Optional.of(heapSizePercentageOfTotalNodeMemory); } return Optional.empty(); } /** Create list of endpoints, these will be consumed later by LbServicesProducer */ private void createEndpointList(DeployState deployState) { if(!deployState.isHosted()) return; if(deployState.getProperties().applicationId().instance().isTester()) return; List<ApplicationClusterEndpoint> endpoints = new ArrayList<>(); List<String> hosts = getContainers().stream() .map(AbstractService::getHostName) .sorted() .toList(); for (String suffix : deployState.getProperties().zoneDnsSuffixes()) { ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom( deployState.zone().system(), ClusterSpec.Id.from(getName()), deployState.getProperties().applicationId(), suffix); endpoints.add(ApplicationClusterEndpoint.builder() .zoneScope() .sharedL4Routing() .dnsName(l4Name) .hosts(hosts) .clusterId(getName()) .build()); } Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints(); endpointsFromController.stream() .filter(ce -> ce.clusterId().equals(getName())) .filter(ce -> ce.routingMethod() == sharedLayer4) .forEach(ce -> ce.names().forEach( name -> endpoints.add(ApplicationClusterEndpoint.builder() .scope(ce.scope()) .weight(Long.valueOf(ce.weight().orElse(1)).intValue()) .routingMethod(ce.routingMethod()) .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) .hosts(hosts) .clusterId(getName()) .build()) )); endpointList = List.copyOf(endpoints); } @Override public void getConfig(ApplicationBundlesConfig.Builder builder) { applicationBundles.stream().map(FileReference::value) .forEach(builder::bundles); } @Override public void getConfig(RankProfilesConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(RankingConstantsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(OnnxModelsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } public void getConfig(RankingExpressionsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (getDocproc() != null) getDocproc().getConfig(builder); builder.transport_events_before_wakeup(transport_events_before_wakeup); builder.numconnectionspertarget(transport_connections_per_target); } @Override public void getConfig(MetricsProxyApiConfig.Builder builder) { builder.metricsPort(MetricsProxyContainer.BASEPORT) .metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH) .prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH); } @Override public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); builder.jvm.verbosegc(true) .availableProcessors(0) .compressedClassSpaceSize(0) .minHeapsize(1536) .heapsize(1536); if (getMemoryPercentage().isPresent()) { builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (Container container : getContainers()) { ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder(); serverBuilder.hostname(container.getHostName()) .id(container.index()) .joining( ! previousHosts.isEmpty() && ! previousHosts.contains(container.getHostName())) .retired(container.isRetired()); builder.server(serverBuilder); } builder.dynamicReconfiguration(true); } @Override public void getConfig(CuratorConfig.Builder builder) { super.getConfig(builder); if (getParent() instanceof ConfigserverCluster) return; builder.zookeeperSessionTimeoutSeconds(zookeeperSessionTimeoutSeconds); } public Optional<String> getTlsClientAuthority() { return tlsClientAuthority; } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } public void setZookeeperSessionTimeoutSeconds(int timeoutSeconds) { this.zookeeperSessionTimeoutSeconds = timeoutSeconds; } protected boolean messageBusEnabled() { return messageBusEnabled; } public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } @Override public List<ApplicationClusterEndpoint> endpoints() { return endpointList; } @Override public String name() { return getName(); } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
Fixed
protected void doPrepare(DeployState deployState) { addAndSendApplicationBundles(deployState); sendUserConfiguredFiles(deployState); createEndpointList(deployState); super.doPrepare(deployState); }
super.doPrepare(deployState);
protected void doPrepare(DeployState deployState) { super.doPrepare(deployState); addAndSendApplicationBundles(deployState); sendUserConfiguredFiles(deployState); createEndpointList(deployState); }
class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements ApplicationBundlesConfig.Producer, QrStartConfig.Producer, RankProfilesConfig.Producer, RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Producer, ContainerMbusConfig.Producer, MetricsProxyApiConfig.Producer, ZookeeperServerConfig.Producer, ApplicationClusterInfo { public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName(); public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH); public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*"); public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName(); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*"); public static final int defaultHeapSizePercentageOfTotalNodeMemory = 70; public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18; private final Set<FileReference> applicationBundles = new LinkedHashSet<>(); private final Set<String> previousHosts; private ContainerModelEvaluation modelEvaluation; private final Optional<String> tlsClientAuthority; private MbusParams mbusParams; private boolean messageBusEnabled = true; private int zookeeperSessionTimeoutSeconds = 30; private final int transport_events_before_wakeup; private final int transport_connections_per_target; private final int heapSizePercentageOfTotalNodeMemory; private Integer memoryPercentage = null; private List<ApplicationClusterEndpoint> endpointList = List.of(); public ApplicationContainerCluster(TreeConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) { super(parent, configSubId, clusterId, deployState, true, 10); this.tlsClientAuthority = deployState.tlsClientAuthority(); previousHosts = Collections.unmodifiableSet(deployState.getPreviousModel().stream() .map(Model::allocatedHosts) .map(AllocatedHosts::getHosts) .flatMap(Collection::stream) .map(HostSpec::hostname) .collect(Collectors.toCollection(() -> new LinkedHashSet<>()))); addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider"); addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider"); addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"); addSimpleComponent("com.yahoo.container.core.documentapi.DocumentAccessProvider"); addSimpleComponent(DOCUMENT_TYPE_MANAGER_CLASS); addMetricsHandlers(); addTestrunnerComponentsIfTester(deployState); transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets(); transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup(); heapSizePercentageOfTotalNodeMemory = deployState.featureFlags().heapSizePercentage() > 0 ? Math.min(99, deployState.featureFlags().heapSizePercentage()) : defaultHeapSizePercentageOfTotalNodeMemory; } @Override private void addAndSendApplicationBundles(DeployState deployState) { for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) { FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir()); applicationBundles.add(reference); } } private void sendUserConfiguredFiles(DeployState deployState) { FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger()); for (Component<?, ?> component : getAllComponents()) { fileSender.sendUserConfiguredFiles(component); } } private void addMetricsHandlers() { addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2); addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2); } private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) { Handler handler = new Handler( new ComponentModel(handlerClass, null, null, null)); handler.addServerBindings(rootBinding, innerBinding); addComponent(handler); } private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components")); addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner")); addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api")); if(deployState.zone().system().isPublic()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd")); } } } public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) { this.modelEvaluation = modelEvaluation; } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } @Override public Optional<Integer> getMemoryPercentage() { if (memoryPercentage != null) { return Optional.of(memoryPercentage); } else if (isHostedVespa()) { return getHostClusterId().isPresent() ? Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) : Optional.of(heapSizePercentageOfTotalNodeMemory); } return Optional.empty(); } /** Create list of endpoints, these will be consumed later by LbServicesProducer */ private void createEndpointList(DeployState deployState) { if(!deployState.isHosted()) return; if(deployState.getProperties().applicationId().instance().isTester()) return; List<ApplicationClusterEndpoint> endpoints = new ArrayList<>(); List<String> hosts = getContainers().stream() .map(AbstractService::getHostName) .sorted() .toList(); for (String suffix : deployState.getProperties().zoneDnsSuffixes()) { ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom( deployState.zone().system(), ClusterSpec.Id.from(getName()), deployState.getProperties().applicationId(), suffix); endpoints.add(ApplicationClusterEndpoint.builder() .zoneScope() .sharedL4Routing() .dnsName(l4Name) .hosts(hosts) .clusterId(getName()) .build()); } Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints(); endpointsFromController.stream() .filter(ce -> ce.clusterId().equals(getName())) .filter(ce -> ce.routingMethod() == sharedLayer4) .forEach(ce -> ce.names().forEach( name -> endpoints.add(ApplicationClusterEndpoint.builder() .scope(ce.scope()) .weight(Long.valueOf(ce.weight().orElse(1)).intValue()) .routingMethod(ce.routingMethod()) .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) .hosts(hosts) .clusterId(getName()) .build()) )); endpointList = List.copyOf(endpoints); } @Override public void getConfig(ApplicationBundlesConfig.Builder builder) { applicationBundles.stream().map(FileReference::value) .forEach(builder::bundles); } @Override public void getConfig(RankProfilesConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(RankingConstantsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(OnnxModelsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } public void getConfig(RankingExpressionsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (getDocproc() != null) getDocproc().getConfig(builder); builder.transport_events_before_wakeup(transport_events_before_wakeup); builder.numconnectionspertarget(transport_connections_per_target); } @Override public void getConfig(MetricsProxyApiConfig.Builder builder) { builder.metricsPort(MetricsProxyContainer.BASEPORT) .metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH) .prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH); } @Override public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); builder.jvm.verbosegc(true) .availableProcessors(0) .compressedClassSpaceSize(0) .minHeapsize(1536) .heapsize(1536); if (getMemoryPercentage().isPresent()) { builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (Container container : getContainers()) { ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder(); serverBuilder.hostname(container.getHostName()) .id(container.index()) .joining( ! previousHosts.isEmpty() && ! previousHosts.contains(container.getHostName())) .retired(container.isRetired()); builder.server(serverBuilder); } builder.dynamicReconfiguration(true); } @Override public void getConfig(CuratorConfig.Builder builder) { super.getConfig(builder); if (getParent() instanceof ConfigserverCluster) return; builder.zookeeperSessionTimeoutSeconds(zookeeperSessionTimeoutSeconds); } public Optional<String> getTlsClientAuthority() { return tlsClientAuthority; } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } public void setZookeeperSessionTimeoutSeconds(int timeoutSeconds) { this.zookeeperSessionTimeoutSeconds = timeoutSeconds; } protected boolean messageBusEnabled() { return messageBusEnabled; } public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } @Override public List<ApplicationClusterEndpoint> endpoints() { return endpointList; } @Override public String name() { return getName(); } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements ApplicationBundlesConfig.Producer, QrStartConfig.Producer, RankProfilesConfig.Producer, RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Producer, ContainerMbusConfig.Producer, MetricsProxyApiConfig.Producer, ZookeeperServerConfig.Producer, ApplicationClusterInfo { public static final String METRICS_V2_HANDLER_CLASS = MetricsV2Handler.class.getName(); public static final BindingPattern METRICS_V2_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH); public static final BindingPattern METRICS_V2_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(MetricsV2Handler.V2_PATH + "/*"); public static final String PROMETHEUS_V1_HANDLER_CLASS = PrometheusV1Handler.class.getName(); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH); private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*"); public static final int defaultHeapSizePercentageOfTotalNodeMemory = 70; public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18; private final Set<FileReference> applicationBundles = new LinkedHashSet<>(); private final Set<String> previousHosts; private ContainerModelEvaluation modelEvaluation; private final Optional<String> tlsClientAuthority; private MbusParams mbusParams; private boolean messageBusEnabled = true; private int zookeeperSessionTimeoutSeconds = 30; private final int transport_events_before_wakeup; private final int transport_connections_per_target; private final int heapSizePercentageOfTotalNodeMemory; private Integer memoryPercentage = null; private List<ApplicationClusterEndpoint> endpointList = List.of(); public ApplicationContainerCluster(TreeConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) { super(parent, configSubId, clusterId, deployState, true, 10); this.tlsClientAuthority = deployState.tlsClientAuthority(); previousHosts = Collections.unmodifiableSet(deployState.getPreviousModel().stream() .map(Model::allocatedHosts) .map(AllocatedHosts::getHosts) .flatMap(Collection::stream) .map(HostSpec::hostname) .collect(Collectors.toCollection(() -> new LinkedHashSet<>()))); addSimpleComponent("com.yahoo.language.provider.DefaultLinguisticsProvider"); addSimpleComponent("com.yahoo.language.provider.DefaultEmbedderProvider"); addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"); addSimpleComponent("com.yahoo.container.core.documentapi.DocumentAccessProvider"); addSimpleComponent(DOCUMENT_TYPE_MANAGER_CLASS); addMetricsHandlers(); addTestrunnerComponentsIfTester(deployState); transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets(); transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup(); heapSizePercentageOfTotalNodeMemory = deployState.featureFlags().heapSizePercentage() > 0 ? Math.min(99, deployState.featureFlags().heapSizePercentage()) : defaultHeapSizePercentageOfTotalNodeMemory; } @Override private void addAndSendApplicationBundles(DeployState deployState) { for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) { FileReference reference = deployState.getFileRegistry().addFile(component.getPathRelativeToAppDir()); applicationBundles.add(reference); } } private void sendUserConfiguredFiles(DeployState deployState) { FileSender fileSender = new FileSender(containers, deployState.getFileRegistry(), deployState.getDeployLogger()); for (Component<?, ?> component : getAllComponents()) { fileSender.sendUserConfiguredFiles(component); } } private void addMetricsHandlers() { addMetricsHandler(METRICS_V2_HANDLER_CLASS, METRICS_V2_HANDLER_BINDING_1, METRICS_V2_HANDLER_BINDING_2); addMetricsHandler(PROMETHEUS_V1_HANDLER_CLASS, PROMETHEUS_V1_HANDLER_BINDING_1, PROMETHEUS_V1_HANDLER_BINDING_2); } private void addMetricsHandler(String handlerClass, BindingPattern rootBinding, BindingPattern innerBinding) { Handler handler = new Handler( new ComponentModel(handlerClass, null, null, null)); handler.addServerBindings(rootBinding, innerBinding); addComponent(handler); } private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-testrunner-components")); addPlatformBundle(PlatformBundles.absoluteBundlePath("vespa-osgi-testrunner")); addPlatformBundle(PlatformBundles.absoluteBundlePath("tenant-cd-api")); if(deployState.zone().system().isPublic()) { addPlatformBundle(PlatformBundles.absoluteBundlePath("cloud-tenant-cd")); } } } public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) { this.modelEvaluation = modelEvaluation; } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } @Override public Optional<Integer> getMemoryPercentage() { if (memoryPercentage != null) { return Optional.of(memoryPercentage); } else if (isHostedVespa()) { return getHostClusterId().isPresent() ? Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) : Optional.of(heapSizePercentageOfTotalNodeMemory); } return Optional.empty(); } /** Create list of endpoints, these will be consumed later by LbServicesProducer */ private void createEndpointList(DeployState deployState) { if(!deployState.isHosted()) return; if(deployState.getProperties().applicationId().instance().isTester()) return; List<ApplicationClusterEndpoint> endpoints = new ArrayList<>(); List<String> hosts = getContainers().stream() .map(AbstractService::getHostName) .sorted() .toList(); for (String suffix : deployState.getProperties().zoneDnsSuffixes()) { ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom( deployState.zone().system(), ClusterSpec.Id.from(getName()), deployState.getProperties().applicationId(), suffix); endpoints.add(ApplicationClusterEndpoint.builder() .zoneScope() .sharedL4Routing() .dnsName(l4Name) .hosts(hosts) .clusterId(getName()) .build()); } Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints(); endpointsFromController.stream() .filter(ce -> ce.clusterId().equals(getName())) .filter(ce -> ce.routingMethod() == sharedLayer4) .forEach(ce -> ce.names().forEach( name -> endpoints.add(ApplicationClusterEndpoint.builder() .scope(ce.scope()) .weight(Long.valueOf(ce.weight().orElse(1)).intValue()) .routingMethod(ce.routingMethod()) .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) .hosts(hosts) .clusterId(getName()) .build()) )); endpointList = List.copyOf(endpoints); } @Override public void getConfig(ApplicationBundlesConfig.Builder builder) { applicationBundles.stream().map(FileReference::value) .forEach(builder::bundles); } @Override public void getConfig(RankProfilesConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(RankingConstantsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(OnnxModelsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } public void getConfig(RankingExpressionsConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (getDocproc() != null) getDocproc().getConfig(builder); builder.transport_events_before_wakeup(transport_events_before_wakeup); builder.numconnectionspertarget(transport_connections_per_target); } @Override public void getConfig(MetricsProxyApiConfig.Builder builder) { builder.metricsPort(MetricsProxyContainer.BASEPORT) .metricsApiPath(ApplicationMetricsHandler.METRICS_VALUES_PATH) .prometheusApiPath(ApplicationMetricsHandler.PROMETHEUS_VALUES_PATH); } @Override public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); builder.jvm.verbosegc(true) .availableProcessors(0) .compressedClassSpaceSize(0) .minHeapsize(1536) .heapsize(1536); if (getMemoryPercentage().isPresent()) { builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (Container container : getContainers()) { ZookeeperServerConfig.Server.Builder serverBuilder = new ZookeeperServerConfig.Server.Builder(); serverBuilder.hostname(container.getHostName()) .id(container.index()) .joining( ! previousHosts.isEmpty() && ! previousHosts.contains(container.getHostName())) .retired(container.isRetired()); builder.server(serverBuilder); } builder.dynamicReconfiguration(true); } @Override public void getConfig(CuratorConfig.Builder builder) { super.getConfig(builder); if (getParent() instanceof ConfigserverCluster) return; builder.zookeeperSessionTimeoutSeconds(zookeeperSessionTimeoutSeconds); } public Optional<String> getTlsClientAuthority() { return tlsClientAuthority; } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } public void setZookeeperSessionTimeoutSeconds(int timeoutSeconds) { this.zookeeperSessionTimeoutSeconds = timeoutSeconds; } protected boolean messageBusEnabled() { return messageBusEnabled; } public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } @Override public List<ApplicationClusterEndpoint> endpoints() { return endpointList; } @Override public String name() { return getName(); } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
If we require that the node takes up the entire host, this should not be possible?
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
else {
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
If we require exclusivity, our host should have exclusiveToApplicationId set, so this should be ```suggestion if (parent.exclusiveToApplicationId().isEmpty() || !parent.exclusiveToApplicationId().get().equals(application)) return true; ```
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true;
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
Yes, I've tried to explain that in the javadoc. It doesn't hurt that this method is a bit more generic than strictly necessary with today's policy, and anyway we might have this at the moment this is deployed.
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
else {
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
That we need this PR at all is proof that we are offered nodes on hosts which are not exclusive. Perhaps that should be fixed elsewhere. With the "fills completely" policy there is no need for exclusiveToApplicationId, though, so perhaps we should remove that property (although not in _this_ PR). In any case, I think this method should enforce the actual constraints regardless of what might obtain at some point in time given the rest of the code (and live pre-existing state). And I think it does with these three constraints?
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true;
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
If the tenant wants to access AWS secret store, they must be allocated on a host that has `exclusiveToApplicationId` set so the host gets the right IAM role & policies
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true;
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
Ok, in that case should the check on line 218 also use `NodeRepository::exclusiveAllocation` to determine exclusivity?
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
else {
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
Ah yes, good point.
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true;
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
Well, then it will always be false so we can just remove the first condition. (We can also remove the second, after a while, if we are sure we want to keep this policy).
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
else {
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
Is there more to discuss here?
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (requestedNodes.isExclusive() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true;
private boolean violatesExclusivity(NodeCandidate candidate) { if (candidate.parent.isEmpty()) return false; if (nodeRepository.exclusiveAllocation(cluster)) { var parent = candidate.parent.get(); if (!candidate.resources().isUnspecified() && ! nodeRepository.resourcesCalculator().advertisedResourcesOf(parent.flavor()).compatibleWith(candidate.resources())) return true; if (parent.exclusiveToApplicationId().isPresent() && !parent.exclusiveToApplicationId().get().equals(application)) return true; if (parent.exclusiveToClusterType().isPresent() && !parent.exclusiveToClusterType().get().equals(cluster.type())) return true; return false; } else { for (Node nodeOnHost : allNodes.childrenOf(candidate.parentHostname().get())) { if (nodeOnHost.allocation().isEmpty()) continue; if (nodeOnHost.allocation().get().membership().cluster().isExclusive()) { if (!nodeOnHost.allocation().get().owner().equals(application)) return true; } } return false; } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
class NodeAllocation { private static final Logger LOG = Logger.getLogger(NodeAllocation.class.getName()); /** List of all nodes in node-repository */ private final NodeList allNodes; /** The application this list is for */ private final ApplicationId application; /** The cluster this list is for */ private final ClusterSpec cluster; /** The requested nodes of this list */ private final NodeSpec requestedNodes; /** The node candidates this has accepted so far, keyed on hostname */ private final Map<String, NodeCandidate> nodes = new LinkedHashMap<>(); /** The number of already allocated nodes accepted and not retired */ private int accepted = 0; /** The number of already allocated nodes accepted and not retired and not needing resize */ private int acceptedWithoutResizingRetired = 0; /** The number of nodes rejected because of clashing parentHostname */ private int rejectedDueToClashingParentHost = 0; /** The number of nodes rejected due to exclusivity constraints */ private int rejectedDueToExclusivity = 0; private int rejectedDueToInsufficientRealResources = 0; /** The number of nodes that just now was changed to retired */ private int wasRetiredJustNow = 0; /** The node indexes to verify uniqueness of each members index */ private final Set<Integer> indexes = new HashSet<>(); /** The next membership index to assign to a new node */ private final Supplier<Integer> nextIndex; private final NodeRepository nodeRepository; private final NodeResourceLimits nodeResourceLimits; private final Optional<String> requiredHostFlavor; NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, Supplier<Integer> nextIndex, NodeRepository nodeRepository) { this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.nextIndex = nextIndex; this.nodeRepository = nodeRepository; this.nodeResourceLimits = new NodeResourceLimits(nodeRepository); this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name()) .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value()) .value()) .filter(s -> !s.isBlank()); } /** * Offer some nodes to this. The nodes may have an allocation to a different application or cluster, * an allocation to this cluster, or no current allocation (in which case one is assigned). * * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. */ void offer(List<NodeCandidate> candidates) { for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); if ( ! allocation.owner().equals(application)) continue; if ( ! membership.cluster().satisfies(cluster)) continue; if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; if ( candidate.state() == Node.State.active && allocation.removable()) continue; if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; if ( indexes.contains(membership.index())) continue; if ( candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable; boolean acceptToRetire = acceptToRetire(candidate); if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) acceptNode(candidate, shouldRetire(candidate, candidates), resizeable); } } else if (! saturated() && hasCompatibleResources(candidate)) { if (! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) { ++rejectedDueToInsufficientRealResources; continue; } if ( violatesParentHostPolicy(candidate)) { ++rejectedDueToClashingParentHost; continue; } if ( violatesExclusivity(candidate)) { ++rejectedDueToExclusivity; continue; } if (candidate.wantToRetire()) { continue; } candidate = candidate.allocate(application, ClusterMembership.from(cluster, nextIndex.get()), requestedNodes.resources().orElse(candidate.resources()), nodeRepository.clock().instant()); if (candidate.isValid()) { acceptNode(candidate, Retirement.none, false); } } } } /** Returns the cause of retirement for given candidate */ private Retirement shouldRetire(NodeCandidate candidate, List<NodeCandidate> candidates) { if ( ! requestedNodes.considerRetiring()) { boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false); return alreadyRetired ? Retirement.alreadyRetired : Retirement.none; } if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits; if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy; if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources; if (candidate.wantToRetire()) return Retirement.hardRequest; if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest; if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity; if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor; return Retirement.none; } private boolean violatesParentHostPolicy(NodeCandidate candidate) { return checkForClashingParentHost() && offeredNodeHasParentHostnameAlreadyAccepted(candidate); } private boolean checkForClashingParentHost() { return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction() && ! application.instance().isTester(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(NodeCandidate candidate) { for (NodeCandidate acceptedNode : nodes.values()) { if (acceptedNode.parentHostname().isPresent() && candidate.parentHostname().isPresent() && acceptedNode.parentHostname().get().equals(candidate.parentHostname().get())) { return true; } } return false; } /** * Returns whether allocating the candidate on this host would violate exclusivity constraints. * Note that while we currently require that exclusive allocations uses the entire host, * this method also handles the case where smaller exclusive nodes are allocated on it. */ /** * Returns whether this node should be accepted into the cluster even if it is not currently desired * (already enough nodes, or wrong resources, etc.). * Such nodes will be marked retired during finalization of the list of accepted nodes. * The conditions for this are: * * This is a stateful node. These must always be retired before being removed to allow the cluster to * migrate away data. * * This is a container node and it is not desired due to having the wrong flavor. In this case this * will (normally) obtain for all the current nodes in the cluster and so retiring before removing must * be used to avoid removing all the current nodes at once, before the newly allocated replacements are * initialized. (In the other case, where a container node is not desired because we have enough nodes we * do want to remove it immediately to get immediate feedback on how the size reduction works out.) */ private boolean acceptToRetire(NodeCandidate candidate) { if (candidate.state() != Node.State.active) return false; if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false; if (candidate.allocation().get().membership().retired()) return true; if (! requestedNodes.considerRetiring()) return false; return cluster.isStateful() || (cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate)); } private boolean hasCompatibleResources(NodeCandidate candidate) { return requestedNodes.isCompatible(candidate.resources()) || candidate.isResizable; } private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) { Node node = candidate.toNode(); if (node.allocation().isPresent()) node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources()))); if (retirement == Retirement.none) { accepted++; if (node.allocation().isEmpty() || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired())) acceptedWithoutResizingRetired++; if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) { node = resize(node); } if (node.state() != Node.State.active) node = node.unretire().removable(false); } else { LOG.info("Retiring " + node + " because " + retirement.description()); ++wasRetiredJustNow; node = node.retire(nodeRepository.clock().instant()); } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { node = setCluster(cluster, node); } candidate = candidate.withNode(node); indexes.add(node.allocation().get().membership().index()); nodes.put(node.hostname(), candidate); return node; } private Node resize(Node node) { NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources(); return node.with(new Flavor(requestedNodes.resources().get() .with(hostResources.diskSpeed()) .with(hostResources.storageType()) .with(hostResources.architecture())), Agent.application, nodeRepository.clock().instant()); } private Node setCluster(ClusterSpec cluster, Node node) { ClusterMembership membership = node.allocation().get().membership().with(cluster); return node.with(node.allocation().get().with(membership)); } /** Returns true if no more nodes are needed in this list */ private boolean saturated() { return requestedNodes.saturatedBy(acceptedWithoutResizingRetired); } /** Returns true if the content of this list is sufficient to meet the request */ boolean fulfilled() { return requestedNodes.fulfilledBy(accepted()); } /** Returns true if this allocation was already fulfilled and resulted in no new changes */ public boolean fulfilledAndNoChanges() { return fulfilled() && reservableNodes().isEmpty() && newNodes().isEmpty(); } /** * Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}. * * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the * flavor and host count required to cover the deficit. */ Optional<HostDeficit> hostDeficit() { if (nodeType().isHost()) { return Optional.empty(); } return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified), requestedNodes.fulfilledDeficitCount(accepted()))) .filter(hostDeficit -> hostDeficit.count() > 0); } /** Returns the indices to use when provisioning hosts for this */ List<Integer> provisionIndices(int count) { if (count < 1) throw new IllegalArgumentException("Count must be positive"); NodeType hostType = requestedNodes.type().hostType(); if (hostType == NodeType.host) return nodeRepository.database().readProvisionIndices(count); Set<Integer> currentIndices = allNodes.nodeType(hostType) .hostnames() .stream() .map(NodeAllocation::parseIndex) .collect(Collectors.toSet()); List<Integer> indices = new ArrayList<>(count); for (int i = 1; indices.size() < count; i++) { if (!currentIndices.contains(i)) { indices.add(i); } } Integer myIndex = parseIndex(HostName.getLocalhost()); indices.remove(myIndex); return indices; } /** The node type this is allocating */ NodeType nodeType() { return requestedNodes.type(); } /** * Make the number of <i>non-retired</i> nodes in the list equal to the requested number * of nodes, and retire the rest of the list. Only retire currently active nodes. * Prefer to retire nodes of the wrong flavor. * Make as few changes to the retired set as possible. * * @return the final list of nodes */ List<Node> finalNodes() { int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count(); int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count(); int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount); if (deltaRetiredCount > 0) { for (NodeCandidate candidate : byRetiringPriority(nodes.values())) { if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) { candidate = candidate.withNode(); candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant())); nodes.put(candidate.toNode().hostname(), candidate); if (--deltaRetiredCount == 0) break; } } } else if (deltaRetiredCount < 0) { for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) { if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) { candidate = candidate.withNode(); if (candidate.isResizable) candidate = candidate.withNode(resize(candidate.toNode())); candidate = candidate.withNode(candidate.toNode().unretire()); nodes.put(candidate.toNode().hostname(), candidate); if (++deltaRetiredCount == 0) break; } } } for (NodeCandidate candidate : nodes.values()) { candidate = candidate.withNode(); Allocation allocation = candidate.allocation().get(); candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership() .with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))))); nodes.put(candidate.toNode().hostname(), candidate); } return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved); return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state())); } List<Node> newNodes() { return nodesFilter(n -> n.isNew); } private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) .toList(); } /** Returns the number of nodes accepted this far */ private int accepted() { if (nodeType() == NodeType.tenant) return accepted; return allNodes.nodeType(nodeType()).size(); } /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) .toList(); } public String allocationFailureDetails() { List<String> reasons = new ArrayList<>(); if (rejectedDueToExclusivity > 0) reasons.add("host exclusivity constraints"); if (rejectedDueToClashingParentHost > 0) reasons.add("insufficient nodes available on separate physical hosts"); if (wasRetiredJustNow > 0) reasons.add("retirement of allocated nodes"); if (rejectedDueToInsufficientRealResources > 0) reasons.add("insufficient real resources on hosts"); if (reasons.isEmpty()) return ""; return ": Not enough suitable nodes available due to " + String.join(", ", reasons); } private static Integer parseIndex(String hostname) { try { return Integer.parseInt(hostname.replaceFirst("^\\D+(\\d+)\\..*", "$1")); } catch (NumberFormatException e) { throw new IllegalArgumentException("Could not parse index from hostname '" + hostname + "'", e); } } /** Possible causes of node retirement */ private enum Retirement { alreadyRetired("node is already retired"), outsideRealLimits("node real resources is outside limits"), violatesParentHostPolicy("node violates parent host policy"), incompatibleResources("node resources are incompatible"), hardRequest("node is requested to retire"), softRequest("node is requested to retire (soft)"), violatesExclusivity("node violates host exclusivity"), violatesHostFlavor("node violates host flavor"), none(""); private final String description; Retirement(String description) { this.description = description; } /** Human readable description of this cause */ public String description() { return description; } } /** A host deficit, the number of missing hosts, for a deployment */ static class HostDeficit { private final NodeResources resources; private final int count; private HostDeficit(NodeResources resources, int count) { this.resources = resources; this.count = count; } NodeResources resources() { return resources; } int count() { return count; } @Override public String toString() { return "deficit of " + count + " nodes with " + resources; } } }
this could also return NixValue.invalid as it will never be called
public Value add(Value value, int used) { return add(value.asLong(), used); }
return add(value.asLong(), used);
public Value add(Value value, int used) { return NixValue.invalid(); }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
```suggestion throw new IllegalArgumentException("Invalid " + schema, e); ```
public DerivedConfiguration(Schema schema, DeployState deployState) { try { Validator.ensureNotNull("Schema", schema); this.schema = schema; this.queryProfiles = deployState.getQueryProfiles().getRegistry(); this.maxUncommittedMemory = deployState.getProperties().featureFlags().maxUnCommittedMemory(); if (!schema.isDocumentsOnly()) { streamingFields = new VsmFields(schema); streamingSummary = new VsmSummary(schema); } if (!schema.isDocumentsOnly()) { attributeFields = new AttributeFields(schema); summaries = new Summaries(schema, deployState.getDeployLogger(), deployState.getProperties().featureFlags()); juniperrc = new Juniperrc(schema); rankProfileList = new RankProfileList(schema, schema.rankExpressionFiles(), attributeFields, deployState); indexingScript = new IndexingScript(schema); indexInfo = new IndexInfo(schema); schemaInfo = new SchemaInfo(schema, deployState.rankProfileRegistry(), summaries); indexSchema = new IndexSchema(schema); importedFields = new ImportedFields(schema); } Validation.validate(this, schema); } catch (IllegalArgumentException|IllegalStateException e) { throw new IllegalArgumentException("Invalid " + schema + " -> " + e.getMessage(), e); } }
throw new IllegalArgumentException("Invalid " + schema + " -> " + e.getMessage(), e);
public DerivedConfiguration(Schema schema, DeployState deployState) { try { Validator.ensureNotNull("Schema", schema); this.schema = schema; this.queryProfiles = deployState.getQueryProfiles().getRegistry(); this.maxUncommittedMemory = deployState.getProperties().featureFlags().maxUnCommittedMemory(); if (!schema.isDocumentsOnly()) { streamingFields = new VsmFields(schema); streamingSummary = new VsmSummary(schema); } if (!schema.isDocumentsOnly()) { attributeFields = new AttributeFields(schema); summaries = new Summaries(schema, deployState.getDeployLogger(), deployState.getProperties().featureFlags()); juniperrc = new Juniperrc(schema); rankProfileList = new RankProfileList(schema, schema.rankExpressionFiles(), attributeFields, deployState); indexingScript = new IndexingScript(schema); indexInfo = new IndexInfo(schema); schemaInfo = new SchemaInfo(schema, deployState.rankProfileRegistry(), summaries); indexSchema = new IndexSchema(schema); importedFields = new ImportedFields(schema); } Validation.validate(this, schema); } catch (IllegalArgumentException|IllegalStateException e) { throw new IllegalArgumentException("Invalid " + schema, e); } }
class DerivedConfiguration implements AttributesConfig.Producer { private final Schema schema; private Summaries summaries; private Juniperrc juniperrc; private AttributeFields attributeFields; private RankProfileList rankProfileList; private IndexingScript indexingScript; private IndexInfo indexInfo; private SchemaInfo schemaInfo; private VsmFields streamingFields; private VsmSummary streamingSummary; private IndexSchema indexSchema; private ImportedFields importedFields; private final QueryProfileRegistry queryProfiles; private final long maxUncommittedMemory; /** * Creates a complete derived configuration from a search definition. * Only used in tests. * * @param schema the search to derive a configuration from. Derived objects will be snapshots, but this argument is * live. Which means that this object will be inconsistent when the given search definition is later * modified. * @param rankProfileRegistry a {@link com.yahoo.schema.RankProfileRegistry} */ public DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry) { this(schema, rankProfileRegistry, new QueryProfileRegistry()); } DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfiles) { this(schema, new DeployState.Builder().rankProfileRegistry(rankProfileRegistry).queryProfiles(queryProfiles).build()); } /** * Creates a complete derived configuration snapshot from a schema. * * @param schema the schema to derive a configuration from. Derived objects will be snapshots, but this * argument is live. Which means that this object will be inconsistent if the given * schema is later modified. */ /** * Exports a complete set of configuration-server format config files. * * @param toDirectory the directory to export to, current dir if null * @throws IOException if exporting fails, some files may still be created */ public void export(String toDirectory) throws IOException { if (!schema.isDocumentsOnly()) { summaries.export(toDirectory); juniperrc.export(toDirectory); attributeFields.export(toDirectory); streamingFields.export(toDirectory); streamingSummary.export(toDirectory); indexSchema.export(toDirectory); rankProfileList.export(toDirectory); indexingScript.export(toDirectory); indexInfo.export(toDirectory); importedFields.export(toDirectory); schemaInfo.export(toDirectory); } } public static void exportDocuments(DocumentmanagerConfig.Builder documentManagerCfg, String toDirectory) throws IOException { exportCfg(new DocumentmanagerConfig(documentManagerCfg), toDirectory + "/" + "documentmanager.cfg"); } public static void exportDocuments(DocumenttypesConfig.Builder documentTypesCfg, String toDirectory) throws IOException { exportCfg(new DocumenttypesConfig(documentTypesCfg), toDirectory + "/" + "documenttypes.cfg"); } public static void exportQueryProfiles(QueryProfileRegistry queryProfileRegistry, String toDirectory) throws IOException { exportCfg(new QueryProfiles(queryProfileRegistry, (level, message) -> {}).getConfig(), toDirectory + "/" + "query-profiles.cfg"); } public void exportConstants(String toDirectory) throws IOException { RankingConstantsConfig.Builder b = new RankingConstantsConfig.Builder(); rankProfileList.getConfig(b); exportCfg(b.build(), toDirectory + "/" + "ranking-constants.cfg"); } private static void exportCfg(ConfigInstance instance, String fileName) throws IOException { Writer writer = null; try { writer = IOUtils.createWriter(fileName, false); writer.write(instance.toString()); writer.write("\n"); } finally { if (writer != null) { IOUtils.closeWriter(writer); } } } public Summaries getSummaries() { return summaries; } public AttributeFields getAttributeFields() { return attributeFields; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, AttributeFields.FieldSet.ALL); } public void getConfig(AttributesConfig.Builder builder, AttributeFields.FieldSet fs) { attributeFields.getConfig(builder, fs, maxUncommittedMemory); } public IndexingScript getIndexingScript() { return indexingScript; } public IndexInfo getIndexInfo() { return indexInfo; } public SchemaInfo getSchemaInfo() { return schemaInfo; } public void setIndexingScript(IndexingScript script) { this.indexingScript = script; } public Schema getSchema() { return schema; } public RankProfileList getRankProfileList() { return rankProfileList; } public VsmSummary getVsmSummary() { return streamingSummary; } public VsmFields getVsmFields() { return streamingFields; } public IndexSchema getIndexSchema() { return indexSchema; } public Juniperrc getJuniperrc() { return juniperrc; } public ImportedFields getImportedFields() { return importedFields; } public QueryProfileRegistry getQueryProfiles() { return queryProfiles; } }
class DerivedConfiguration implements AttributesConfig.Producer { private final Schema schema; private Summaries summaries; private Juniperrc juniperrc; private AttributeFields attributeFields; private RankProfileList rankProfileList; private IndexingScript indexingScript; private IndexInfo indexInfo; private SchemaInfo schemaInfo; private VsmFields streamingFields; private VsmSummary streamingSummary; private IndexSchema indexSchema; private ImportedFields importedFields; private final QueryProfileRegistry queryProfiles; private final long maxUncommittedMemory; /** * Creates a complete derived configuration from a search definition. * Only used in tests. * * @param schema the search to derive a configuration from. Derived objects will be snapshots, but this argument is * live. Which means that this object will be inconsistent when the given search definition is later * modified. * @param rankProfileRegistry a {@link com.yahoo.schema.RankProfileRegistry} */ public DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry) { this(schema, rankProfileRegistry, new QueryProfileRegistry()); } DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfiles) { this(schema, new DeployState.Builder().rankProfileRegistry(rankProfileRegistry).queryProfiles(queryProfiles).build()); } /** * Creates a complete derived configuration snapshot from a schema. * * @param schema the schema to derive a configuration from. Derived objects will be snapshots, but this * argument is live. Which means that this object will be inconsistent if the given * schema is later modified. */ /** * Exports a complete set of configuration-server format config files. * * @param toDirectory the directory to export to, current dir if null * @throws IOException if exporting fails, some files may still be created */ public void export(String toDirectory) throws IOException { if (!schema.isDocumentsOnly()) { summaries.export(toDirectory); juniperrc.export(toDirectory); attributeFields.export(toDirectory); streamingFields.export(toDirectory); streamingSummary.export(toDirectory); indexSchema.export(toDirectory); rankProfileList.export(toDirectory); indexingScript.export(toDirectory); indexInfo.export(toDirectory); importedFields.export(toDirectory); schemaInfo.export(toDirectory); } } public static void exportDocuments(DocumentmanagerConfig.Builder documentManagerCfg, String toDirectory) throws IOException { exportCfg(new DocumentmanagerConfig(documentManagerCfg), toDirectory + "/" + "documentmanager.cfg"); } public static void exportDocuments(DocumenttypesConfig.Builder documentTypesCfg, String toDirectory) throws IOException { exportCfg(new DocumenttypesConfig(documentTypesCfg), toDirectory + "/" + "documenttypes.cfg"); } public static void exportQueryProfiles(QueryProfileRegistry queryProfileRegistry, String toDirectory) throws IOException { exportCfg(new QueryProfiles(queryProfileRegistry, (level, message) -> {}).getConfig(), toDirectory + "/" + "query-profiles.cfg"); } public void exportConstants(String toDirectory) throws IOException { RankingConstantsConfig.Builder b = new RankingConstantsConfig.Builder(); rankProfileList.getConfig(b); exportCfg(b.build(), toDirectory + "/" + "ranking-constants.cfg"); } private static void exportCfg(ConfigInstance instance, String fileName) throws IOException { Writer writer = null; try { writer = IOUtils.createWriter(fileName, false); writer.write(instance.toString()); writer.write("\n"); } finally { if (writer != null) { IOUtils.closeWriter(writer); } } } public Summaries getSummaries() { return summaries; } public AttributeFields getAttributeFields() { return attributeFields; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, AttributeFields.FieldSet.ALL); } public void getConfig(AttributesConfig.Builder builder, AttributeFields.FieldSet fs) { attributeFields.getConfig(builder, fs, maxUncommittedMemory); } public IndexingScript getIndexingScript() { return indexingScript; } public IndexInfo getIndexInfo() { return indexInfo; } public SchemaInfo getSchemaInfo() { return schemaInfo; } public void setIndexingScript(IndexingScript script) { this.indexingScript = script; } public Schema getSchema() { return schema; } public RankProfileList getRankProfileList() { return rankProfileList; } public VsmSummary getVsmSummary() { return streamingSummary; } public VsmFields getVsmFields() { return streamingFields; } public IndexSchema getIndexSchema() { return indexSchema; } public Juniperrc getJuniperrc() { return juniperrc; } public ImportedFields getImportedFields() { return importedFields; } public QueryProfileRegistry getQueryProfiles() { return queryProfiles; } }
this could also return NixValue.invalid as it will never be called
public Value add(Value value, int used) { return add(value.asDouble(), used); }
return add(value.asDouble(), used);
public Value add(Value value, int used) { return NixValue.invalid(); }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
Fixed
public Value add(Value value, int used) { return add(value.asLong(), used); }
return add(value.asLong(), used);
public Value add(Value value, int used) { return NixValue.invalid(); }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
Fixed
public Value add(Value value, int used) { return add(value.asDouble(), used); }
return add(value.asDouble(), used);
public Value add(Value value, int used) { return NixValue.invalid(); }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
class EmptyImpl implements Impl { public void prepareFor(ArrayValue self, Type type) { if (type == Type.LONG) { self.impl = new LongImpl(); } else if (type == Type.DOUBLE) { self.impl = new DoubleImpl(); } else { self.impl = new GenericImpl(this, 0); } } public Value add(long value, int used) { return NixValue.invalid(); } public Value add(double value, int used) { return NixValue.invalid(); } public Value get(int index) { return NixValue.invalid(); } }
Should this be IllegalArgumentException too, it is error in the input ?
public StringBuilder toString(StringBuilder string, SerializationContext context, Deque<String> path, CompositeNode parent) { if (reference.isIdentifier() && context.getBinding(getName()) != null) { return string.append(context.getBinding(getName())); } ExpressionFunction function = context.getFunction(getName()); if (function != null && function.arguments().size() == getArguments().size() && getOutput() == null) { if (path == null) path = new ArrayDeque<>(); String myPath = getName() + getArguments().expressions(); if (path.contains(myPath)) throw new IllegalStateException("Cycle in ranking expression function '" + getName() + "' called from: " + path); path.addLast(myPath); String functionName = getName(); boolean needSerialization = (getArguments().size() > 0) || context.needSerialization(functionName); if ( needSerialization ) { ExpressionFunction.Instance instance = function.expand(context, getArguments().expressions(), path); functionName = instance.getName(); context.addFunctionSerialization(RankingExpression.propertyName(functionName), instance.getExpressionString()); for (Map.Entry<String, TensorType> argumentType : function.argumentTypes().entrySet()) context.addArgumentTypeSerialization(functionName, argumentType.getKey(), argumentType.getValue()); if (function.returnType().isPresent()) context.addFunctionTypeSerialization(functionName, function.returnType().get()); } path.removeLast(); return string.append(wrapInRankingExpression(functionName)); } return reference.toString(string, context, path, parent); }
throw new IllegalStateException("Cycle in ranking expression function '" + getName() + "' called from: " + path);
public StringBuilder toString(StringBuilder string, SerializationContext context, Deque<String> path, CompositeNode parent) { if (reference.isIdentifier() && context.getBinding(getName()) != null) { return string.append(context.getBinding(getName())); } ExpressionFunction function = context.getFunction(getName()); if (function != null && function.arguments().size() == getArguments().size() && getOutput() == null) { if (path == null) path = new ArrayDeque<>(); String myPath = getName() + getArguments().expressions(); if (path.contains(myPath)) throw new IllegalArgumentException("Cycle in ranking expression function '" + getName() + "' called from: " + path); path.addLast(myPath); String functionName = getName(); boolean needSerialization = (getArguments().size() > 0) || context.needSerialization(functionName); if ( needSerialization ) { ExpressionFunction.Instance instance = function.expand(context, getArguments().expressions(), path); functionName = instance.getName(); context.addFunctionSerialization(RankingExpression.propertyName(functionName), instance.getExpressionString()); for (Map.Entry<String, TensorType> argumentType : function.argumentTypes().entrySet()) context.addArgumentTypeSerialization(functionName, argumentType.getKey(), argumentType.getValue()); if (function.returnType().isPresent()) context.addFunctionTypeSerialization(functionName, function.returnType().get()); } path.removeLast(); return string.append(wrapInRankingExpression(functionName)); } return reference.toString(string, context, path, parent); }
class ReferenceNode extends CompositeNode { private final Reference reference; /* Parses this string into a reference */ public ReferenceNode(String name) { this.reference = Reference.simple(name).orElseGet(() -> Reference.fromIdentifier(name)); } public ReferenceNode(String name, List<? extends ExpressionNode> arguments, String output) { this.reference = new Reference(name, arguments != null ? new Arguments(arguments) : Arguments.EMPTY, output); } public ReferenceNode(Reference reference) { this.reference = reference; } public String getName() { return reference.name(); } /** Returns the arguments, never null */ public Arguments getArguments() { return reference.arguments(); } /** Returns a copy of this where the arguments are replaced by the given arguments */ public ReferenceNode setArguments(List<ExpressionNode> arguments) { return new ReferenceNode(reference.withArguments(new Arguments(arguments))); } /** Returns the specific output this references, or null if none specified */ public String getOutput() { return reference.output(); } /** Returns a copy of this node with a modified output */ public ReferenceNode setOutput(String output) { return new ReferenceNode(reference.withOutput(output)); } /** Returns an empty list as this has no children */ @Override public List<ExpressionNode> children() { return reference.arguments().expressions(); } @Override /** Returns the reference of this node */ public Reference reference() { return reference; } @Override public TensorType type(TypeContext<Reference> context) { TensorType type = null; try { type = context.getType(reference); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(reference + " is invalid", e); } if (type == null) throw new IllegalArgumentException("Unknown feature '" + this + "'"); return type; } @Override public Value evaluate(Context context) { if (reference.isIdentifier()) return context.get(reference.name()); else return context.get(getName(), getArguments(), getOutput()); } @Override public CompositeNode setChildren(List<ExpressionNode> newChildren) { return setArguments(newChildren); } @Override public int hashCode() { return reference.hashCode(); } }
class ReferenceNode extends CompositeNode { private final Reference reference; /* Parses this string into a reference */ public ReferenceNode(String name) { this.reference = Reference.simple(name).orElseGet(() -> Reference.fromIdentifier(name)); } public ReferenceNode(String name, List<? extends ExpressionNode> arguments, String output) { this.reference = new Reference(name, arguments != null ? new Arguments(arguments) : Arguments.EMPTY, output); } public ReferenceNode(Reference reference) { this.reference = reference; } public String getName() { return reference.name(); } /** Returns the arguments, never null */ public Arguments getArguments() { return reference.arguments(); } /** Returns a copy of this where the arguments are replaced by the given arguments */ public ReferenceNode setArguments(List<ExpressionNode> arguments) { return new ReferenceNode(reference.withArguments(new Arguments(arguments))); } /** Returns the specific output this references, or null if none specified */ public String getOutput() { return reference.output(); } /** Returns a copy of this node with a modified output */ public ReferenceNode setOutput(String output) { return new ReferenceNode(reference.withOutput(output)); } /** Returns an empty list as this has no children */ @Override public List<ExpressionNode> children() { return reference.arguments().expressions(); } @Override /** Returns the reference of this node */ public Reference reference() { return reference; } @Override public TensorType type(TypeContext<Reference> context) { TensorType type = null; try { type = context.getType(reference); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(reference + " is invalid", e); } if (type == null) throw new IllegalArgumentException("Unknown feature '" + this + "'"); return type; } @Override public Value evaluate(Context context) { if (reference.isIdentifier()) return context.get(reference.name()); else return context.get(getName(), getArguments(), getOutput()); } @Override public CompositeNode setChildren(List<ExpressionNode> newChildren) { return setArguments(newChildren); } @Override public int hashCode() { return reference.hashCode(); } }
Yes.
public StringBuilder toString(StringBuilder string, SerializationContext context, Deque<String> path, CompositeNode parent) { if (reference.isIdentifier() && context.getBinding(getName()) != null) { return string.append(context.getBinding(getName())); } ExpressionFunction function = context.getFunction(getName()); if (function != null && function.arguments().size() == getArguments().size() && getOutput() == null) { if (path == null) path = new ArrayDeque<>(); String myPath = getName() + getArguments().expressions(); if (path.contains(myPath)) throw new IllegalStateException("Cycle in ranking expression function '" + getName() + "' called from: " + path); path.addLast(myPath); String functionName = getName(); boolean needSerialization = (getArguments().size() > 0) || context.needSerialization(functionName); if ( needSerialization ) { ExpressionFunction.Instance instance = function.expand(context, getArguments().expressions(), path); functionName = instance.getName(); context.addFunctionSerialization(RankingExpression.propertyName(functionName), instance.getExpressionString()); for (Map.Entry<String, TensorType> argumentType : function.argumentTypes().entrySet()) context.addArgumentTypeSerialization(functionName, argumentType.getKey(), argumentType.getValue()); if (function.returnType().isPresent()) context.addFunctionTypeSerialization(functionName, function.returnType().get()); } path.removeLast(); return string.append(wrapInRankingExpression(functionName)); } return reference.toString(string, context, path, parent); }
throw new IllegalStateException("Cycle in ranking expression function '" + getName() + "' called from: " + path);
public StringBuilder toString(StringBuilder string, SerializationContext context, Deque<String> path, CompositeNode parent) { if (reference.isIdentifier() && context.getBinding(getName()) != null) { return string.append(context.getBinding(getName())); } ExpressionFunction function = context.getFunction(getName()); if (function != null && function.arguments().size() == getArguments().size() && getOutput() == null) { if (path == null) path = new ArrayDeque<>(); String myPath = getName() + getArguments().expressions(); if (path.contains(myPath)) throw new IllegalArgumentException("Cycle in ranking expression function '" + getName() + "' called from: " + path); path.addLast(myPath); String functionName = getName(); boolean needSerialization = (getArguments().size() > 0) || context.needSerialization(functionName); if ( needSerialization ) { ExpressionFunction.Instance instance = function.expand(context, getArguments().expressions(), path); functionName = instance.getName(); context.addFunctionSerialization(RankingExpression.propertyName(functionName), instance.getExpressionString()); for (Map.Entry<String, TensorType> argumentType : function.argumentTypes().entrySet()) context.addArgumentTypeSerialization(functionName, argumentType.getKey(), argumentType.getValue()); if (function.returnType().isPresent()) context.addFunctionTypeSerialization(functionName, function.returnType().get()); } path.removeLast(); return string.append(wrapInRankingExpression(functionName)); } return reference.toString(string, context, path, parent); }
class ReferenceNode extends CompositeNode { private final Reference reference; /* Parses this string into a reference */ public ReferenceNode(String name) { this.reference = Reference.simple(name).orElseGet(() -> Reference.fromIdentifier(name)); } public ReferenceNode(String name, List<? extends ExpressionNode> arguments, String output) { this.reference = new Reference(name, arguments != null ? new Arguments(arguments) : Arguments.EMPTY, output); } public ReferenceNode(Reference reference) { this.reference = reference; } public String getName() { return reference.name(); } /** Returns the arguments, never null */ public Arguments getArguments() { return reference.arguments(); } /** Returns a copy of this where the arguments are replaced by the given arguments */ public ReferenceNode setArguments(List<ExpressionNode> arguments) { return new ReferenceNode(reference.withArguments(new Arguments(arguments))); } /** Returns the specific output this references, or null if none specified */ public String getOutput() { return reference.output(); } /** Returns a copy of this node with a modified output */ public ReferenceNode setOutput(String output) { return new ReferenceNode(reference.withOutput(output)); } /** Returns an empty list as this has no children */ @Override public List<ExpressionNode> children() { return reference.arguments().expressions(); } @Override /** Returns the reference of this node */ public Reference reference() { return reference; } @Override public TensorType type(TypeContext<Reference> context) { TensorType type = null; try { type = context.getType(reference); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(reference + " is invalid", e); } if (type == null) throw new IllegalArgumentException("Unknown feature '" + this + "'"); return type; } @Override public Value evaluate(Context context) { if (reference.isIdentifier()) return context.get(reference.name()); else return context.get(getName(), getArguments(), getOutput()); } @Override public CompositeNode setChildren(List<ExpressionNode> newChildren) { return setArguments(newChildren); } @Override public int hashCode() { return reference.hashCode(); } }
class ReferenceNode extends CompositeNode { private final Reference reference; /* Parses this string into a reference */ public ReferenceNode(String name) { this.reference = Reference.simple(name).orElseGet(() -> Reference.fromIdentifier(name)); } public ReferenceNode(String name, List<? extends ExpressionNode> arguments, String output) { this.reference = new Reference(name, arguments != null ? new Arguments(arguments) : Arguments.EMPTY, output); } public ReferenceNode(Reference reference) { this.reference = reference; } public String getName() { return reference.name(); } /** Returns the arguments, never null */ public Arguments getArguments() { return reference.arguments(); } /** Returns a copy of this where the arguments are replaced by the given arguments */ public ReferenceNode setArguments(List<ExpressionNode> arguments) { return new ReferenceNode(reference.withArguments(new Arguments(arguments))); } /** Returns the specific output this references, or null if none specified */ public String getOutput() { return reference.output(); } /** Returns a copy of this node with a modified output */ public ReferenceNode setOutput(String output) { return new ReferenceNode(reference.withOutput(output)); } /** Returns an empty list as this has no children */ @Override public List<ExpressionNode> children() { return reference.arguments().expressions(); } @Override /** Returns the reference of this node */ public Reference reference() { return reference; } @Override public TensorType type(TypeContext<Reference> context) { TensorType type = null; try { type = context.getType(reference); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(reference + " is invalid", e); } if (type == null) throw new IllegalArgumentException("Unknown feature '" + this + "'"); return type; } @Override public Value evaluate(Context context) { if (reference.isIdentifier()) return context.get(reference.name()); else return context.get(getName(), getArguments(), getOutput()); } @Override public CompositeNode setChildren(List<ExpressionNode> newChildren) { return setArguments(newChildren); } @Override public int hashCode() { return reference.hashCode(); } }
Exceptions.toMessageString() will be used when serializing this, which will recursively concatenate the nested exception (preceded by ": ").
public DerivedConfiguration(Schema schema, DeployState deployState) { try { Validator.ensureNotNull("Schema", schema); this.schema = schema; this.queryProfiles = deployState.getQueryProfiles().getRegistry(); this.maxUncommittedMemory = deployState.getProperties().featureFlags().maxUnCommittedMemory(); if (!schema.isDocumentsOnly()) { streamingFields = new VsmFields(schema); streamingSummary = new VsmSummary(schema); } if (!schema.isDocumentsOnly()) { attributeFields = new AttributeFields(schema); summaries = new Summaries(schema, deployState.getDeployLogger(), deployState.getProperties().featureFlags()); juniperrc = new Juniperrc(schema); rankProfileList = new RankProfileList(schema, schema.rankExpressionFiles(), attributeFields, deployState); indexingScript = new IndexingScript(schema); indexInfo = new IndexInfo(schema); schemaInfo = new SchemaInfo(schema, deployState.rankProfileRegistry(), summaries); indexSchema = new IndexSchema(schema); importedFields = new ImportedFields(schema); } Validation.validate(this, schema); } catch (IllegalArgumentException|IllegalStateException e) { throw new IllegalArgumentException("Invalid " + schema + " -> " + e.getMessage(), e); } }
throw new IllegalArgumentException("Invalid " + schema + " -> " + e.getMessage(), e);
public DerivedConfiguration(Schema schema, DeployState deployState) { try { Validator.ensureNotNull("Schema", schema); this.schema = schema; this.queryProfiles = deployState.getQueryProfiles().getRegistry(); this.maxUncommittedMemory = deployState.getProperties().featureFlags().maxUnCommittedMemory(); if (!schema.isDocumentsOnly()) { streamingFields = new VsmFields(schema); streamingSummary = new VsmSummary(schema); } if (!schema.isDocumentsOnly()) { attributeFields = new AttributeFields(schema); summaries = new Summaries(schema, deployState.getDeployLogger(), deployState.getProperties().featureFlags()); juniperrc = new Juniperrc(schema); rankProfileList = new RankProfileList(schema, schema.rankExpressionFiles(), attributeFields, deployState); indexingScript = new IndexingScript(schema); indexInfo = new IndexInfo(schema); schemaInfo = new SchemaInfo(schema, deployState.rankProfileRegistry(), summaries); indexSchema = new IndexSchema(schema); importedFields = new ImportedFields(schema); } Validation.validate(this, schema); } catch (IllegalArgumentException|IllegalStateException e) { throw new IllegalArgumentException("Invalid " + schema, e); } }
class DerivedConfiguration implements AttributesConfig.Producer { private final Schema schema; private Summaries summaries; private Juniperrc juniperrc; private AttributeFields attributeFields; private RankProfileList rankProfileList; private IndexingScript indexingScript; private IndexInfo indexInfo; private SchemaInfo schemaInfo; private VsmFields streamingFields; private VsmSummary streamingSummary; private IndexSchema indexSchema; private ImportedFields importedFields; private final QueryProfileRegistry queryProfiles; private final long maxUncommittedMemory; /** * Creates a complete derived configuration from a search definition. * Only used in tests. * * @param schema the search to derive a configuration from. Derived objects will be snapshots, but this argument is * live. Which means that this object will be inconsistent when the given search definition is later * modified. * @param rankProfileRegistry a {@link com.yahoo.schema.RankProfileRegistry} */ public DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry) { this(schema, rankProfileRegistry, new QueryProfileRegistry()); } DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfiles) { this(schema, new DeployState.Builder().rankProfileRegistry(rankProfileRegistry).queryProfiles(queryProfiles).build()); } /** * Creates a complete derived configuration snapshot from a schema. * * @param schema the schema to derive a configuration from. Derived objects will be snapshots, but this * argument is live. Which means that this object will be inconsistent if the given * schema is later modified. */ /** * Exports a complete set of configuration-server format config files. * * @param toDirectory the directory to export to, current dir if null * @throws IOException if exporting fails, some files may still be created */ public void export(String toDirectory) throws IOException { if (!schema.isDocumentsOnly()) { summaries.export(toDirectory); juniperrc.export(toDirectory); attributeFields.export(toDirectory); streamingFields.export(toDirectory); streamingSummary.export(toDirectory); indexSchema.export(toDirectory); rankProfileList.export(toDirectory); indexingScript.export(toDirectory); indexInfo.export(toDirectory); importedFields.export(toDirectory); schemaInfo.export(toDirectory); } } public static void exportDocuments(DocumentmanagerConfig.Builder documentManagerCfg, String toDirectory) throws IOException { exportCfg(new DocumentmanagerConfig(documentManagerCfg), toDirectory + "/" + "documentmanager.cfg"); } public static void exportDocuments(DocumenttypesConfig.Builder documentTypesCfg, String toDirectory) throws IOException { exportCfg(new DocumenttypesConfig(documentTypesCfg), toDirectory + "/" + "documenttypes.cfg"); } public static void exportQueryProfiles(QueryProfileRegistry queryProfileRegistry, String toDirectory) throws IOException { exportCfg(new QueryProfiles(queryProfileRegistry, (level, message) -> {}).getConfig(), toDirectory + "/" + "query-profiles.cfg"); } public void exportConstants(String toDirectory) throws IOException { RankingConstantsConfig.Builder b = new RankingConstantsConfig.Builder(); rankProfileList.getConfig(b); exportCfg(b.build(), toDirectory + "/" + "ranking-constants.cfg"); } private static void exportCfg(ConfigInstance instance, String fileName) throws IOException { Writer writer = null; try { writer = IOUtils.createWriter(fileName, false); writer.write(instance.toString()); writer.write("\n"); } finally { if (writer != null) { IOUtils.closeWriter(writer); } } } public Summaries getSummaries() { return summaries; } public AttributeFields getAttributeFields() { return attributeFields; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, AttributeFields.FieldSet.ALL); } public void getConfig(AttributesConfig.Builder builder, AttributeFields.FieldSet fs) { attributeFields.getConfig(builder, fs, maxUncommittedMemory); } public IndexingScript getIndexingScript() { return indexingScript; } public IndexInfo getIndexInfo() { return indexInfo; } public SchemaInfo getSchemaInfo() { return schemaInfo; } public void setIndexingScript(IndexingScript script) { this.indexingScript = script; } public Schema getSchema() { return schema; } public RankProfileList getRankProfileList() { return rankProfileList; } public VsmSummary getVsmSummary() { return streamingSummary; } public VsmFields getVsmFields() { return streamingFields; } public IndexSchema getIndexSchema() { return indexSchema; } public Juniperrc getJuniperrc() { return juniperrc; } public ImportedFields getImportedFields() { return importedFields; } public QueryProfileRegistry getQueryProfiles() { return queryProfiles; } }
class DerivedConfiguration implements AttributesConfig.Producer { private final Schema schema; private Summaries summaries; private Juniperrc juniperrc; private AttributeFields attributeFields; private RankProfileList rankProfileList; private IndexingScript indexingScript; private IndexInfo indexInfo; private SchemaInfo schemaInfo; private VsmFields streamingFields; private VsmSummary streamingSummary; private IndexSchema indexSchema; private ImportedFields importedFields; private final QueryProfileRegistry queryProfiles; private final long maxUncommittedMemory; /** * Creates a complete derived configuration from a search definition. * Only used in tests. * * @param schema the search to derive a configuration from. Derived objects will be snapshots, but this argument is * live. Which means that this object will be inconsistent when the given search definition is later * modified. * @param rankProfileRegistry a {@link com.yahoo.schema.RankProfileRegistry} */ public DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry) { this(schema, rankProfileRegistry, new QueryProfileRegistry()); } DerivedConfiguration(Schema schema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfiles) { this(schema, new DeployState.Builder().rankProfileRegistry(rankProfileRegistry).queryProfiles(queryProfiles).build()); } /** * Creates a complete derived configuration snapshot from a schema. * * @param schema the schema to derive a configuration from. Derived objects will be snapshots, but this * argument is live. Which means that this object will be inconsistent if the given * schema is later modified. */ /** * Exports a complete set of configuration-server format config files. * * @param toDirectory the directory to export to, current dir if null * @throws IOException if exporting fails, some files may still be created */ public void export(String toDirectory) throws IOException { if (!schema.isDocumentsOnly()) { summaries.export(toDirectory); juniperrc.export(toDirectory); attributeFields.export(toDirectory); streamingFields.export(toDirectory); streamingSummary.export(toDirectory); indexSchema.export(toDirectory); rankProfileList.export(toDirectory); indexingScript.export(toDirectory); indexInfo.export(toDirectory); importedFields.export(toDirectory); schemaInfo.export(toDirectory); } } public static void exportDocuments(DocumentmanagerConfig.Builder documentManagerCfg, String toDirectory) throws IOException { exportCfg(new DocumentmanagerConfig(documentManagerCfg), toDirectory + "/" + "documentmanager.cfg"); } public static void exportDocuments(DocumenttypesConfig.Builder documentTypesCfg, String toDirectory) throws IOException { exportCfg(new DocumenttypesConfig(documentTypesCfg), toDirectory + "/" + "documenttypes.cfg"); } public static void exportQueryProfiles(QueryProfileRegistry queryProfileRegistry, String toDirectory) throws IOException { exportCfg(new QueryProfiles(queryProfileRegistry, (level, message) -> {}).getConfig(), toDirectory + "/" + "query-profiles.cfg"); } public void exportConstants(String toDirectory) throws IOException { RankingConstantsConfig.Builder b = new RankingConstantsConfig.Builder(); rankProfileList.getConfig(b); exportCfg(b.build(), toDirectory + "/" + "ranking-constants.cfg"); } private static void exportCfg(ConfigInstance instance, String fileName) throws IOException { Writer writer = null; try { writer = IOUtils.createWriter(fileName, false); writer.write(instance.toString()); writer.write("\n"); } finally { if (writer != null) { IOUtils.closeWriter(writer); } } } public Summaries getSummaries() { return summaries; } public AttributeFields getAttributeFields() { return attributeFields; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, AttributeFields.FieldSet.ALL); } public void getConfig(AttributesConfig.Builder builder, AttributeFields.FieldSet fs) { attributeFields.getConfig(builder, fs, maxUncommittedMemory); } public IndexingScript getIndexingScript() { return indexingScript; } public IndexInfo getIndexInfo() { return indexInfo; } public SchemaInfo getSchemaInfo() { return schemaInfo; } public void setIndexingScript(IndexingScript script) { this.indexingScript = script; } public Schema getSchema() { return schema; } public RankProfileList getRankProfileList() { return rankProfileList; } public VsmSummary getVsmSummary() { return streamingSummary; } public VsmFields getVsmFields() { return streamingFields; } public IndexSchema getIndexSchema() { return indexSchema; } public Juniperrc getJuniperrc() { return juniperrc; } public ImportedFields getImportedFields() { return importedFields; } public QueryProfileRegistry getQueryProfiles() { return queryProfiles; } }
Is the cluster ID redundant because this always happens in the context of a particular content cluster (i.e. the routing is done on "the outside") or is it later inferred from the request itself?
public SetResponse setUnitState(final SetUnitStateRequest request) throws StateRestApiException { UnitPathResolver<SetResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends SetResponse> req = resolver.visit(request.getUnitPath(), new UnitPathResolver.AbstractVisitor<>(request.getUnitPath(), "State can only be set at cluster or node level") { @Override public Request<? extends SetResponse> visitCluster(Id.Cluster id) { return new SetNodeStatesForClusterRequest(request); } @Override public Request<? extends SetResponse> visitNode(Id.Node id) { return new SetNodeStateRequest(id, request); } }); resolver.resolveFleetController(request.getUnitPath()).schedule(req); req.waitForCompletion(); try{ return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } }
return new SetNodeStatesForClusterRequest(request);
public SetResponse setUnitState(final SetUnitStateRequest request) throws StateRestApiException { UnitPathResolver<SetResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends SetResponse> req = resolver.visit(request.getUnitPath(), new UnitPathResolver.AbstractVisitor<>(request.getUnitPath(), "State can only be set at cluster or node level") { @Override public Request<? extends SetResponse> visitCluster(Id.Cluster id) { return new SetNodeStatesForClusterRequest(request); } @Override public Request<? extends SetResponse> visitNode(Id.Node id) { return new SetNodeStateRequest(id, request); } }); resolver.resolveFleetController(request.getUnitPath()).schedule(req); req.waitForCompletion(); try{ return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } }
class ClusterControllerStateRestAPI implements StateRestAPI { private static final Logger log = Logger.getLogger(ClusterControllerStateRestAPI.class.getName()); public interface FleetControllerResolver { Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers(); } public record Socket(String hostname, int port) { @Override public String toString() { return hostname + ":" + port; } } private final FleetControllerResolver fleetControllerResolver; private final Map<Integer, Socket> clusterControllerSockets; public ClusterControllerStateRestAPI(FleetControllerResolver resolver, Map<Integer, Socket> clusterControllerSockets) { fleetControllerResolver = resolver; this.clusterControllerSockets = clusterControllerSockets; } @Override public UnitResponse getState(final UnitStateRequest request) throws StateRestApiException { log.finest("Got getState() request"); UnitPathResolver<UnitResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends UnitResponse> req = resolver.visit( request.getUnitPath(), new UnitPathResolver.Visitor<>() { @Override public Request<? extends UnitResponse> visitGlobal() { return new ClusterListRequest(request.getRecursiveLevels(), fleetControllerResolver); } @Override public Request<? extends UnitResponse> visitCluster(Id.Cluster id) { return new ClusterStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitService(Id.Service id) { return new ServiceStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitNode(Id.Node id) { return new NodeStateRequest(id); } }); if (req instanceof ClusterListRequest) { log.fine("Got cluster list request"); req.doRemoteFleetControllerTask(null); req.notifyCompleted(); log.finest("Completed processing cluster list request"); } else { log.fine("Scheduling state request: " + req.getClass().toString()); resolver.resolveFleetController(request.getUnitPath()).schedule(req); log.finest("Scheduled state request: " + req.getClass()); req.waitForCompletion(); log.finest("Completed processing state request: " + req.getClass()); } try { return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } } @Override private void createAndThrowOtherMasterException(int master) throws StateRestApiException { Socket s = clusterControllerSockets.get(master); if (s == null) throw new InternalFailure( "Cannot create redirect response to master at index " + master + ", as we failed to get correct config to detect running cluster controllers."); throw new OtherMasterException(s.hostname, s.port); } }
class ClusterControllerStateRestAPI implements StateRestAPI { private static final Logger log = Logger.getLogger(ClusterControllerStateRestAPI.class.getName()); public interface FleetControllerResolver { Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers(); } public record Socket(String hostname, int port) { @Override public String toString() { return hostname + ":" + port; } } private final FleetControllerResolver fleetControllerResolver; private final Map<Integer, Socket> clusterControllerSockets; public ClusterControllerStateRestAPI(FleetControllerResolver resolver, Map<Integer, Socket> clusterControllerSockets) { fleetControllerResolver = resolver; this.clusterControllerSockets = clusterControllerSockets; } @Override public UnitResponse getState(final UnitStateRequest request) throws StateRestApiException { log.finest("Got getState() request"); UnitPathResolver<UnitResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends UnitResponse> req = resolver.visit( request.getUnitPath(), new UnitPathResolver.Visitor<>() { @Override public Request<? extends UnitResponse> visitGlobal() { return new ClusterListRequest(request.getRecursiveLevels(), fleetControllerResolver); } @Override public Request<? extends UnitResponse> visitCluster(Id.Cluster id) { return new ClusterStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitService(Id.Service id) { return new ServiceStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitNode(Id.Node id) { return new NodeStateRequest(id); } }); if (req instanceof ClusterListRequest) { log.fine("Got cluster list request"); req.doRemoteFleetControllerTask(null); req.notifyCompleted(); log.finest("Completed processing cluster list request"); } else { log.fine("Scheduling state request: " + req.getClass().toString()); resolver.resolveFleetController(request.getUnitPath()).schedule(req); log.finest("Scheduled state request: " + req.getClass()); req.waitForCompletion(); log.finest("Completed processing state request: " + req.getClass()); } try { return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } } @Override private void createAndThrowOtherMasterException(int master) throws StateRestApiException { Socket s = clusterControllerSockets.get(master); if (s == null) throw new InternalFailure( "Cannot create redirect response to master at index " + master + ", as we failed to get correct config to detect running cluster controllers."); throw new OtherMasterException(s.hostname, s.port); } }
Yes, the context decides the cluster
public SetResponse setUnitState(final SetUnitStateRequest request) throws StateRestApiException { UnitPathResolver<SetResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends SetResponse> req = resolver.visit(request.getUnitPath(), new UnitPathResolver.AbstractVisitor<>(request.getUnitPath(), "State can only be set at cluster or node level") { @Override public Request<? extends SetResponse> visitCluster(Id.Cluster id) { return new SetNodeStatesForClusterRequest(request); } @Override public Request<? extends SetResponse> visitNode(Id.Node id) { return new SetNodeStateRequest(id, request); } }); resolver.resolveFleetController(request.getUnitPath()).schedule(req); req.waitForCompletion(); try{ return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } }
return new SetNodeStatesForClusterRequest(request);
public SetResponse setUnitState(final SetUnitStateRequest request) throws StateRestApiException { UnitPathResolver<SetResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends SetResponse> req = resolver.visit(request.getUnitPath(), new UnitPathResolver.AbstractVisitor<>(request.getUnitPath(), "State can only be set at cluster or node level") { @Override public Request<? extends SetResponse> visitCluster(Id.Cluster id) { return new SetNodeStatesForClusterRequest(request); } @Override public Request<? extends SetResponse> visitNode(Id.Node id) { return new SetNodeStateRequest(id, request); } }); resolver.resolveFleetController(request.getUnitPath()).schedule(req); req.waitForCompletion(); try{ return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } }
class ClusterControllerStateRestAPI implements StateRestAPI { private static final Logger log = Logger.getLogger(ClusterControllerStateRestAPI.class.getName()); public interface FleetControllerResolver { Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers(); } public record Socket(String hostname, int port) { @Override public String toString() { return hostname + ":" + port; } } private final FleetControllerResolver fleetControllerResolver; private final Map<Integer, Socket> clusterControllerSockets; public ClusterControllerStateRestAPI(FleetControllerResolver resolver, Map<Integer, Socket> clusterControllerSockets) { fleetControllerResolver = resolver; this.clusterControllerSockets = clusterControllerSockets; } @Override public UnitResponse getState(final UnitStateRequest request) throws StateRestApiException { log.finest("Got getState() request"); UnitPathResolver<UnitResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends UnitResponse> req = resolver.visit( request.getUnitPath(), new UnitPathResolver.Visitor<>() { @Override public Request<? extends UnitResponse> visitGlobal() { return new ClusterListRequest(request.getRecursiveLevels(), fleetControllerResolver); } @Override public Request<? extends UnitResponse> visitCluster(Id.Cluster id) { return new ClusterStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitService(Id.Service id) { return new ServiceStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitNode(Id.Node id) { return new NodeStateRequest(id); } }); if (req instanceof ClusterListRequest) { log.fine("Got cluster list request"); req.doRemoteFleetControllerTask(null); req.notifyCompleted(); log.finest("Completed processing cluster list request"); } else { log.fine("Scheduling state request: " + req.getClass().toString()); resolver.resolveFleetController(request.getUnitPath()).schedule(req); log.finest("Scheduled state request: " + req.getClass()); req.waitForCompletion(); log.finest("Completed processing state request: " + req.getClass()); } try { return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } } @Override private void createAndThrowOtherMasterException(int master) throws StateRestApiException { Socket s = clusterControllerSockets.get(master); if (s == null) throw new InternalFailure( "Cannot create redirect response to master at index " + master + ", as we failed to get correct config to detect running cluster controllers."); throw new OtherMasterException(s.hostname, s.port); } }
class ClusterControllerStateRestAPI implements StateRestAPI { private static final Logger log = Logger.getLogger(ClusterControllerStateRestAPI.class.getName()); public interface FleetControllerResolver { Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers(); } public record Socket(String hostname, int port) { @Override public String toString() { return hostname + ":" + port; } } private final FleetControllerResolver fleetControllerResolver; private final Map<Integer, Socket> clusterControllerSockets; public ClusterControllerStateRestAPI(FleetControllerResolver resolver, Map<Integer, Socket> clusterControllerSockets) { fleetControllerResolver = resolver; this.clusterControllerSockets = clusterControllerSockets; } @Override public UnitResponse getState(final UnitStateRequest request) throws StateRestApiException { log.finest("Got getState() request"); UnitPathResolver<UnitResponse> resolver = new UnitPathResolver<>(fleetControllerResolver.getFleetControllers()); Request<? extends UnitResponse> req = resolver.visit( request.getUnitPath(), new UnitPathResolver.Visitor<>() { @Override public Request<? extends UnitResponse> visitGlobal() { return new ClusterListRequest(request.getRecursiveLevels(), fleetControllerResolver); } @Override public Request<? extends UnitResponse> visitCluster(Id.Cluster id) { return new ClusterStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitService(Id.Service id) { return new ServiceStateRequest(id, request.getRecursiveLevels()); } @Override public Request<? extends UnitResponse> visitNode(Id.Node id) { return new NodeStateRequest(id); } }); if (req instanceof ClusterListRequest) { log.fine("Got cluster list request"); req.doRemoteFleetControllerTask(null); req.notifyCompleted(); log.finest("Completed processing cluster list request"); } else { log.fine("Scheduling state request: " + req.getClass().toString()); resolver.resolveFleetController(request.getUnitPath()).schedule(req); log.finest("Scheduled state request: " + req.getClass()); req.waitForCompletion(); log.finest("Completed processing state request: " + req.getClass()); } try { return req.getResult(); } catch (OtherMasterIndexException e) { createAndThrowOtherMasterException(e.getMasterIndex()); throw new RuntimeException("Should not get here"); } } @Override private void createAndThrowOtherMasterException(int master) throws StateRestApiException { Socket s = clusterControllerSockets.get(master); if (s == null) throw new InternalFailure( "Cannot create redirect response to master at index " + master + ", as we failed to get correct config to detect running cluster controllers."); throw new OtherMasterException(s.hostname, s.port); } }
This should probably read "down" rather than "in maintenance"
void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(DISTRIBUTOR, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); }
void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(DISTRIBUTOR, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion)); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(STORAGE, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(2) .redundancy(2) .initial_redundancy(2); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion)); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(STORAGE, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int nodeIndex = 0; nodeIndex < nodesPerGroup; ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } }
For this particular loop I think it would make sense to collapse `nodeIndex` and `j` into a single `nodeIndex` loop variable
private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); }
for (int j = 0; j < nodes; ++j, ++nodeIndex) {
private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion)); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(DISTRIBUTOR, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(STORAGE, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(2) .redundancy(2) .initial_redundancy(2); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion)); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(DISTRIBUTOR, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(STORAGE, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int nodeIndex = 0; nodeIndex < nodesPerGroup; ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } }
I'm not sure if any of the tests actually care about it, but _technically_(tm) these values should be equal to `groups` (or some even multiple) to ensure there's at least 1 replica per group. Partition spec should ideally also be group-aware, but that one is not likely to matter at all for any CC tests (so can probably ignore it unless something fails at test-time).
private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(2) .redundancy(2) .initial_redundancy(2); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); }
.initial_redundancy(2);
private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int nodeIndex = 0; nodeIndex < nodesPerGroup; ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion)); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(DISTRIBUTOR, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(STORAGE, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion)); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(DISTRIBUTOR, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getDistributorNodeInfo(0) .setWantedState(new NodeState(STORAGE, DOWN).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(STORAGE, State.MAINTENANCE).setDescription("Orchestrator")); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } }
`successFactorDeviation` is in the range [-baseline, 1 - baseline]
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == 1.0 ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation > 0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
log.log(Level.FINE, "Could not update traffic share on all applications", lastException);
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == -successFactorBaseline ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation < -0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; private final Double successFactorBaseline; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); this.successFactorBaseline = successFactorBaseline; } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
Consider using formatting args for all these instead of having both concatenation and formatting
private static ClusterState defaultAllUpClusterState(int nodeCount) { return clusterState(String.format("version:%d distributor:" + nodeCount + " storage:" + nodeCount, currentClusterStateVersion)); }
return clusterState(String.format("version:%d distributor:" + nodeCount + " storage:" + nodeCount, currentClusterStateVersion));
private static ClusterState defaultAllUpClusterState(int nodeCount) { return clusterState(String.format("version:%d distributor:%d storage:%d", currentClusterStateVersion, nodeCount , nodeCount)); }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return defaultAllUpClusterState(4); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); setStorageNodeWantedStateToMaintenance(cluster, 0); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testMaintenanceAllowedFor2Of4Groups() { Collection<ConfiguredNode> nodes = createNodes(4); StorDistributionConfig config = createDistributionConfig(4, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, defaultAllUpClusterState()); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 .0.s:d storage:4 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 storage:4 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Nodes in 2 groups are already down, cannot take down another node", result.getReason()); } } @Test void testMaintenanceAllowedFor2Of4Groups8Nodes() { Collection<ConfiguredNode> nodes = createNodes(8); StorDistributionConfig config = createDistributionConfig(8, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { ClusterState clusterState = defaultAllUpClusterState(8); int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 .0.s:d storage:8 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 4; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Nodes in 2 groups are already down, cannot take down another node", result.getReason()); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 3; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } } @Test void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); setDistributorNodeWantedState(cluster, 0, new NodeState(DISTRIBUTOR, DOWN), "Orchestrator"); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); setDistributorNodeWantedState(cluster, 0, new NodeState(DISTRIBUTOR, DOWN), "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); setStorageNodeWantedState(cluster, 0, new NodeState(STORAGE, MAINTENANCE), "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, UP), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } private void checkSettingToMaintenanceIsAllowed(int nodeIndex, NodeStateChangeChecker nodeStateChangeChecker, ClusterState clusterState) { Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Preconditions fulfilled and new state different", result.getReason()); } private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex) { setStorageNodeWantedStateToMaintenance(cluster, nodeIndex, "Orchestrator"); } private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex, String description) { cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setWantedState(MAINTENANCE_NODE_STATE.setDescription(description)); } private void setStorageNodeWantedState(ContentCluster cluster, int nodeIndex, NodeState state, String description) { cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setWantedState(state.setDescription(description)); } private void setDistributorNodeWantedState(ContentCluster cluster, int nodeIndex, NodeState state, String description) { cluster.clusterInfo().getDistributorNodeInfo(nodeIndex).setWantedState(state.setDescription(description)); } }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { return ClusterState.stateFromString(state); } private static ClusterState defaultAllUpClusterState() { return defaultAllUpClusterState(4); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount, int maxNumberOfGroupsAllowedToBeDown) { return createCluster(nodeCount, 1, maxNumberOfGroupsAllowedToBeDown); } private ContentCluster createCluster(int nodeCount, int groupCount, int maxNumberOfGroupsAllowedToBeDown) { List<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution, maxNumberOfGroupsAllowedToBeDown); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWithForce(int maxNumberOfGroupsAllowedToBeDown) { var nodeStateChangeChecker = createChangeChecker(createCluster(1, maxNumberOfGroupsAllowedToBeDown)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDeniedInMoratorium(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testUnknownStorageNode(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); setStorageNodeWantedStateToMaintenance(cluster, 0); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testMaintenanceAllowedFor2Of4Groups() { Collection<ConfiguredNode> nodes = createNodes(4); StorDistributionConfig config = createDistributionConfig(4, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, defaultAllUpClusterState()); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 .0.s:d storage:4 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 storage:4 .0.s:m .1.s:m .2.s:d", currentClusterStateVersion)); int nodeIndex = 2; cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setReportedState(new NodeState(STORAGE, DOWN), 0); Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most 2 groups can have wanted state: [0, 1, 2]", result.getReason()); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 storage:4 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason()); } } @Test void testMaintenanceAllowedFor2Of4Groups8Nodes() { Collection<ConfiguredNode> nodes = createNodes(8); StorDistributionConfig config = createDistributionConfig(8, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { ClusterState clusterState = defaultAllUpClusterState(8); int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 .0.s:d storage:8 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 4; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason()); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 3; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .3.s:m", currentClusterStateVersion)); setStorageNodeWantedState(cluster, 3, MAINTENANCE, "Maintenance, set by operator"); setStorageNodeWantedState(cluster, 2, UP, ""); int nodeIndex = 2; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); setDistributorNodeWantedState(cluster, 0, DOWN, "Orchestrator"); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, 2, maxNumberOfGroupsAllowedToBeDown); setDistributorNodeWantedState(cluster, 0, DOWN, "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); if (maxNumberOfGroupsAllowedToBeDown >= 1) assertEquals("Wanted state already set for another node in groups: [0]", result.getReason()); else assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); if (maxNumberOfGroupsAllowedToBeDown >= 1) { assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Wanted state already set for another node in groups: [0]", result.getReason()); } else { assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, 2, maxNumberOfGroupsAllowedToBeDown); setStorageNodeWantedState(cluster, 0, MAINTENANCE, "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); if (maxNumberOfGroupsAllowedToBeDown >= 1) assertEquals("At most 1 groups can have wanted state: [0]", result.getReason()); else assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeSetStateDistributors(int maxNumberOfGroupsAllowedToBeDown) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1, 1, maxNumberOfGroupsAllowedToBeDown)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeSafeMissingStorage(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeStorageSafeYes(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, 1, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSetUpFailsIfReportedIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanSetUpEvenIfOldWantedStateIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeStorageSafeNo(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeIfMissingMinReplicationFactor(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeIfStorageNodeMissingFromNodeInfo(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testMissingDistributorState(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription, int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription, int maxNumberOfGroupsAllowedToBeDown) { return transitionToSameState(MAINTENANCE, oldDescription, newDescription, maxNumberOfGroupsAllowedToBeDown); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSettingUpWhenUpCausesAlreadySet(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToSameState(UP, "foo", "bar", maxNumberOfGroupsAllowedToBeDown); assertTrue(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSettingAlreadySetState(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToSameState("foo", "foo", maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDifferentDescriptionImpliesDenied(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToSameState("foo", "bar", maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, UP), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWhenAllUp(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWhenAllUpOrRetired(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWhenStorageIsDown(int maxNumberOfGroupsAllowedToBeDown) { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCannotUpgradeWhenOtherStorageIsDown(int maxNumberOfGroupsAllowedToBeDown) { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testNodeRatioRequirementConsidersGeneratedNodeStates(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByNonRetiredState(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByBuckets(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByReportedState(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByVersionMismatch(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testAllowedToSetDown(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0, maxNumberOfGroupsAllowedToBeDown); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets, int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } private void checkSettingToMaintenanceIsAllowed(int nodeIndex, NodeStateChangeChecker nodeStateChangeChecker, ClusterState clusterState) { Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Preconditions fulfilled and new state different", result.getReason()); } private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex) { setStorageNodeWantedState(cluster, nodeIndex, MAINTENANCE, "Orchestrator"); } private void setStorageNodeWantedState(ContentCluster cluster, int nodeIndex, State state, String description) { NodeState nodeState = new NodeState(STORAGE, state); cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setWantedState(nodeState.setDescription(description)); } private void setDistributorNodeWantedState(ContentCluster cluster, int nodeIndex, State state, String description) { NodeState nodeState = new NodeState(DISTRIBUTOR, state); cluster.clusterInfo().getDistributorNodeInfo(nodeIndex).setWantedState(nodeState.setDescription(description)); } }
🙄 , thanks!
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == 1.0 ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation > 0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
log.log(Level.FINE, "Could not update traffic share on all applications", lastException);
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == -successFactorBaseline ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation < -0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; private final Double successFactorBaseline; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); this.successFactorBaseline = successFactorBaseline; } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
This still applies. Baseline is set to .7, so deviation can never be -1
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == 1.0 ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation > 0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
log.log(Level.FINE, "Could not update traffic share on all applications", lastException);
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == -successFactorBaseline ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation < -0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; private final Double successFactorBaseline; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); this.successFactorBaseline = successFactorBaseline; } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
:( Waiting for a brain reboot
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == 1.0 ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation > 0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
log.log(Level.FINE, "Could not update traffic share on all applications", lastException);
protected double maintain() { Exception lastException = null; int attempts = 0; int failures = 0; var metrics = collectClusterMetrics(); for (var application : applications.asList()) { for (var instance : application.instances().values()) { for (var deployment : instance.productionDeployments().values()) { if (shuttingDown()) return 0.0; try { attempts++; var bcpGroups = BcpGroup.groupsFrom(instance, application.deploymentSpec()); var patch = new ApplicationPatch(); addTrafficShare(deployment, bcpGroups, patch); addBcpGroupInfo(deployment.zone().region(), metrics.get(instance.id()), bcpGroups, patch); nodeRepository.patchApplication(deployment.zone(), instance.id(), patch); } catch (Exception e) { failures++; lastException = e; } } } } double successFactorDeviation = asSuccessFactorDeviation(attempts, failures); if ( successFactorDeviation == -successFactorBaseline ) log.log(Level.WARNING, "Could not update traffic share on any applications", lastException); else if ( successFactorDeviation < -0.1 ) log.log(Level.FINE, "Could not update traffic share on all applications", lastException); return successFactorDeviation; }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
class BcpGroupUpdater extends ControllerMaintainer { private final ApplicationController applications; private final NodeRepository nodeRepository; private final Double successFactorBaseline; public BcpGroupUpdater(Controller controller, Duration duration, Double successFactorBaseline) { super(controller, duration, successFactorBaseline); this.applications = controller.applications(); this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); this.successFactorBaseline = successFactorBaseline; } @Override /** Adds deployment traffic share to the given patch. */ private void addTrafficShare(Deployment deployment, List<BcpGroup> bcpGroups, ApplicationPatch patch) { double currentReadShare = 0; double maxReadShare = 0; for (BcpGroup group : bcpGroups) { if ( ! group.contains(deployment.zone().region())) continue; double deploymentQps = deployment.metrics().queriesPerSecond(); double groupQps = group.totalQps(); double fraction = group.fraction(deployment.zone().region()); currentReadShare += groupQps == 0 ? 0 : fraction * deploymentQps / groupQps; maxReadShare += group.size() == 1 ? currentReadShare : fraction * ( deploymentQps + group.maxQpsExcluding(deployment.zone().region()) / (group.size() - 1) ) / groupQps; } patch.currentReadShare = currentReadShare; patch.maxReadShare = maxReadShare; } private Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> collectClusterMetrics() { Map<ApplicationId, Map<ClusterSpec.Id, ClusterDeploymentMetrics>> metrics = new HashMap<>(); for (var deploymentEntry : new HashMap<>(controller().applications().deploymentInfo()).entrySet()) { if ( ! deploymentEntry.getKey().zoneId().environment().isProduction()) continue; var appEntry = metrics.computeIfAbsent(deploymentEntry.getKey().applicationId(), __ -> new HashMap<>()); for (var clusterEntry : deploymentEntry.getValue().clusters().entrySet()) { var clusterMetrics = appEntry.computeIfAbsent(clusterEntry.getKey(), __ -> new ClusterDeploymentMetrics()); clusterMetrics.put(deploymentEntry.getKey().zoneId().region(), new DeploymentMetrics(clusterEntry.getValue().target().metrics().queryRate(), clusterEntry.getValue().target().metrics().growthRateHeadroom(), clusterEntry.getValue().target().metrics().cpuCostPerQuery())); } } return metrics; } /** Adds bcp group info to the given patch, for any clusters where we have information. */ private void addBcpGroupInfo(RegionName regionToUpdate, Map<ClusterSpec.Id, ClusterDeploymentMetrics> metrics, List<BcpGroup> bcpGroups, ApplicationPatch patch) { if (metrics == null) return; for (var clusterEntry : metrics.entrySet()) { addClusterBcpGroupInfo(clusterEntry.getKey(), clusterEntry.getValue(), regionToUpdate, bcpGroups, patch); } } private void addClusterBcpGroupInfo(ClusterSpec.Id id, ClusterDeploymentMetrics metrics, RegionName regionToUpdate, List<BcpGroup> bcpGroups, ApplicationPatch patch) { var weightedSumOfMaxMetrics = DeploymentMetrics.empty(); double sumOfCompleteMemberships = 0; for (BcpGroup bcpGroup : bcpGroups) { if ( ! bcpGroup.contains(regionToUpdate)) continue; var groupMetrics = metrics.subsetOf(bcpGroup); if ( ! groupMetrics.isCompleteExcluding(regionToUpdate, bcpGroup)) continue; var max = groupMetrics.maxQueryRateExcluding(regionToUpdate, bcpGroup); if (max.isEmpty()) continue; weightedSumOfMaxMetrics = weightedSumOfMaxMetrics.add(max.get().multipliedBy(bcpGroup.fraction(regionToUpdate))); sumOfCompleteMemberships += bcpGroup.fraction(regionToUpdate); } if (sumOfCompleteMemberships > 0) patch.clusters.put(id.value(), weightedSumOfMaxMetrics.dividedBy(sumOfCompleteMemberships).asClusterPatch()); } /** * A set of regions which will take over traffic from each other if one of them fails. * Each region will take an equal share (modulated by fraction) of the failing region's traffic. * * A regions membership in a group may be partial, represented by a fraction [0, 1], * in which case the other regions will collectively only take that fraction of the failing regions traffic, * and symmetrically, the region will only take its fraction of its share of traffic of any other failing region. */ private static class BcpGroup { /** The instance which has this group. */ private final Instance instance; /** Regions in this group, with their fractions. */ private final Map<RegionName, Double> regions; /** Creates a group of a subset of the deployments in this instance. */ private BcpGroup(Instance instance, Map<RegionName, Double> regions) { this.instance = instance; this.regions = regions; } /** Returns the sum of the fractional memberships of this. */ double size() { return regions.values().stream().mapToDouble(f -> f).sum(); } Set<RegionName> regions() { return regions.keySet(); } double fraction(RegionName region) { return regions.getOrDefault(region, 0.0); } boolean contains(RegionName region) { return regions.containsKey(region); } double totalQps() { return instance.productionDeployments().values().stream() .mapToDouble(i -> i.metrics().queriesPerSecond()).sum(); } double maxQpsExcluding(RegionName region) { return instance.productionDeployments().values().stream() .filter(d -> ! d.zone().region().equals(region)) .mapToDouble(d -> d.metrics().queriesPerSecond() * fraction(d.zone().region())) .max() .orElse(0); } private static Bcp bcpOf(InstanceName instanceName, DeploymentSpec deploymentSpec) { var instanceSpec = deploymentSpec.instance(instanceName); if (instanceSpec.isEmpty()) return deploymentSpec.bcp(); return instanceSpec.get().bcp().orElse(deploymentSpec.bcp()); } private static Map<RegionName, Double> regionsFrom(Instance instance) { return instance.productionDeployments().values().stream() .collect(Collectors.toMap(deployment -> deployment.zone().region(), __ -> 1.0)); } private static Map<RegionName, Double> regionsFrom(Bcp.Group groupSpec) { return groupSpec.members().stream() .collect(Collectors.toMap(member -> member.region(), member -> member.fraction())); } static List<BcpGroup> groupsFrom(Instance instance, DeploymentSpec deploymentSpec) { Bcp bcp = bcpOf(instance.name(), deploymentSpec); if (bcp.isEmpty()) return List.of(new BcpGroup(instance, regionsFrom(instance))); return bcp.groups().stream().map(groupSpec -> new BcpGroup(instance, regionsFrom(groupSpec))).toList(); } } record ApplicationClusterKey(ApplicationId application, ClusterSpec.Id cluster) { } static class ClusterDeploymentMetrics { private final Map<RegionName, DeploymentMetrics> deploymentMetrics; public ClusterDeploymentMetrics() { this.deploymentMetrics = new ConcurrentHashMap<>(); } public ClusterDeploymentMetrics(Map<RegionName, DeploymentMetrics> deploymentMetrics) { this.deploymentMetrics = new ConcurrentHashMap<>(deploymentMetrics); } void put(RegionName region, DeploymentMetrics metrics) { deploymentMetrics.put(region, metrics); } ClusterDeploymentMetrics subsetOf(BcpGroup group) { Map<RegionName, DeploymentMetrics> filteredMetrics = new HashMap<>(); for (var entry : deploymentMetrics.entrySet()) { if (group.contains(entry.getKey())) filteredMetrics.put(entry.getKey(), entry.getValue()); } return new ClusterDeploymentMetrics(filteredMetrics); } /** Returns whether this has deployment metrics for each of the deployments in the given instance. */ boolean isCompleteExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup).allMatch(region -> deploymentMetrics.containsKey(region)); } /** Returns the metrics with the max query rate among the given instance, if any. */ Optional<DeploymentMetrics> maxQueryRateExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return regionsExcluding(regionToExclude, bcpGroup) .map(region -> deploymentMetrics.get(region)) .max(Comparator.comparingDouble(m -> m.queryRate)); } private Stream<RegionName> regionsExcluding(RegionName regionToExclude, BcpGroup bcpGroup) { return bcpGroup.regions().stream() .filter(region -> ! region.equals(regionToExclude)); } } /** Metrics for a given application, cluster and deployment. */ record DeploymentMetrics(double queryRate, double growthRateHeadroom, double cpuCostPerQuery) { public ApplicationPatch.ClusterPatch asClusterPatch() { return new ApplicationPatch.ClusterPatch(new ApplicationPatch.BcpGroupInfo(queryRate, growthRateHeadroom, cpuCostPerQuery)); } DeploymentMetrics dividedBy(double d) { return new DeploymentMetrics(queryRate / d, growthRateHeadroom / d, cpuCostPerQuery / d); } DeploymentMetrics multipliedBy(double m) { return new DeploymentMetrics(queryRate * m, growthRateHeadroom * m, cpuCostPerQuery * m); } DeploymentMetrics add(DeploymentMetrics other) { return new DeploymentMetrics(queryRate + other.queryRate, growthRateHeadroom + other.growthRateHeadroom, cpuCostPerQuery + other.cpuCostPerQuery); } public static DeploymentMetrics empty() { return new DeploymentMetrics(0, 0, 0); } } }
```suggestion CompoundName cached = cache.get(subName); ``` No point in not using cached entries.
private CompoundName(String name, String [] compounds, boolean useCache) { this.name = Objects.requireNonNull(name, "Name can not be null"); this.lowerCasedName = toLowerCase(name); if (compounds.length == 1) { if (compounds[0].isEmpty()) { this.compounds = List.of(); this.hashCode = 0; rest = first = this; return; } this.compounds = new ImmutableArrayList(compounds); this.hashCode = this.compounds.hashCode(); rest = first = empty; return; } CompoundName[] children = new CompoundName[compounds.length]; for (int i = 0; i + 1 < children.length; i++) { int start = 0, end = i == 0 ? -1 : children[0].name.length(); for (int j = 0; j + i < children.length; j++) { end += compounds[j + i].length() + 1; if (end == start) throw new IllegalArgumentException("'" + name + "' is not a legal compound name. " + "Consecutive, leading or trailing dots are not allowed."); String subName = this.name.substring(start, end); CompoundName cached = useCache ? cache.get(subName) : null; children[j] = cached != null ? cached : new CompoundName(subName, this.lowerCasedName.substring(start, end), Arrays.copyOfRange(compounds, j, j + i + 1), i == 0 ? empty : children[j + 1], i == 0 ? empty : children[j]); if (useCache && cached == null) cache.put(subName, children[j]); start += compounds[j].length() + 1; } } this.compounds = new ImmutableArrayList(compounds); this.hashCode = this.compounds.hashCode(); this.rest = children[1]; this.first = children[0]; }
CompoundName cached = useCache ? cache.get(subName) : null;
private CompoundName(String name, String [] compounds, boolean useCache) { this.name = Objects.requireNonNull(name, "Name can not be null"); this.lowerCasedName = toLowerCase(name); if (compounds.length == 1) { if (compounds[0].isEmpty()) { this.compounds = List.of(); this.hashCode = 0; rest = first = this; return; } this.compounds = new ImmutableArrayList(compounds); this.hashCode = this.compounds.hashCode(); rest = first = empty; return; } CompoundName[] children = new CompoundName[compounds.length]; for (int i = 0; i + 1 < children.length; i++) { int start = 0, end = i == 0 ? -1 : children[0].name.length(); for (int j = 0; j + i < children.length; j++) { end += compounds[j + i].length() + 1; if (end == start) throw new IllegalArgumentException("'" + name + "' is not a legal compound name. " + "Consecutive, leading or trailing dots are not allowed."); String subName = this.name.substring(start, end); CompoundName cached = cache.get(subName); children[j] = cached != null ? cached : new CompoundName(subName, this.lowerCasedName.substring(start, end), Arrays.copyOfRange(compounds, j, j + i + 1), i == 0 ? empty : children[j + 1], i == 0 ? empty : children[j]); if (useCache && cached == null) cache.put(subName, children[j]); start += compounds[j].length() + 1; } } this.compounds = new ImmutableArrayList(compounds); this.hashCode = this.compounds.hashCode(); this.rest = children[1]; this.first = children[0]; }
class CompoundName { private static final int MAX_CACHE_SIZE = 10_000; private static final Map<String, CompoundName> cache = new CopyOnWriteHashMap<>(); /** The empty compound */ public static final CompoundName empty = CompoundName.from(""); /* The string name of this compound. */ private final String name; private final String lowerCasedName; private final List<String> compounds; /** A hashcode which is always derived from the compounds (NEVER the string) */ private final int hashCode; /** This name with the first component removed */ private final CompoundName rest; /** This name with the last component removed */ private final CompoundName first; /** * Constructs this from a string which may contains dot-separated components * * @throws NullPointerException if name is null */ public CompoundName(String name) { this(name, false); } private CompoundName(String name, boolean useCache) { this(name, parse(name).toArray(new String[0]), useCache); } /** Constructs this from an array of name components which are assumed not to contain dots */ public static CompoundName fromComponents(String ... components) { return new CompoundName(List.of(components)); } /** Constructs this from a list of compounds. */ public CompoundName(List<String> compounds) { this(compounds.toArray(new String[0])); } private CompoundName(String [] compounds) { this(toCompoundString(compounds), compounds, false); } /** * Constructs this from a name with already parsed compounds. * Private to avoid creating names with inconsistencies. * * @param name the string representation of the compounds * @param compounds the compounds of this name */ private CompoundName(String name, String lowerCasedName, String[] compounds, CompoundName rest, CompoundName first) { this.name = name; this.lowerCasedName = lowerCasedName; this.compounds = new ImmutableArrayList(compounds); this.hashCode = this.compounds.hashCode(); this.rest = rest; this.first = first; } private static List<String> parse(String s) { ArrayList<String> l = null; int p = 0; final int m = s.length(); for (int i = 0; i < m; i++) { if (s.charAt(i) == '.') { if (l == null) l = new ArrayList<>(8); l.add(s.substring(p, i)); p = i + 1; } } if (p == 0) { if (l == null) return List.of(s); l.add(s); } else if (p < m) { l.add(s.substring(p, m)); } else { throw new IllegalArgumentException("'" + s + "' is not a legal compound name. Names can not end with a dot."); } return l; } /** * Returns a compound name which has the given compound string appended to it * * @param name if name is empty this returns <code>this</code> */ public CompoundName append(String name) { if (name.isEmpty()) return this; return append(new CompoundName(name)); } /** * Returns a compound name which has the given compounds appended to it * * @param name if name is empty this returns <code>this</code> */ public CompoundName append(CompoundName name) { if (name.isEmpty()) return this; if (isEmpty()) return name; String [] newCompounds = new String[compounds.size() + name.compounds.size()]; int count = 0; for (String s : compounds) { newCompounds[count++] = s; } for (String s : name.compounds) { newCompounds[count++] = s; } return new CompoundName(concat(this.name, name.name), newCompounds, false); } private static String concat(String name1, String name2) { return name1 + "." + name2; } /** * Returns a compound name which has the given name components prepended to this name, * in the given order, i.e new ComponentName("c").prepend("a","b") will yield "a.b.c". * * @param nameParts if name is empty this returns <code>this</code> */ public CompoundName prepend(String ... nameParts) { if (nameParts.length == 0) return this; if (isEmpty()) return fromComponents(nameParts); List<String> newCompounds = new ArrayList<>(nameParts.length + compounds.size()); newCompounds.addAll(Arrays.asList(nameParts)); newCompounds.addAll(this.compounds); return new CompoundName(newCompounds); } /** * Returns the name after the last dot. If there are no dots, the full name is returned. */ public String last() { if (compounds.isEmpty()) return ""; return compounds.get(compounds.size() - 1); } /** * Returns the name before the first dot. If there are no dots the full name is returned. */ public String first() { if (compounds.isEmpty()) return ""; return compounds.get(0); } /** * Returns the first n components of this. * * @throws IllegalArgumentException if this does not have at least n components */ public CompoundName first(int n) { if (compounds.size() < n) throw new IllegalArgumentException("Asked for the first " + n + " components but '" + this + "' only have " + compounds.size() + " components."); if (compounds.size() == n) return this; if (compounds.size() == 0) return empty; if (compounds.size() - 1 == n) return first; return first.first(n); } /** * Returns the name after the first dot, or "" if this name has no dots */ public CompoundName rest() { return rest; } /** * Returns the name starting after the n first components (i.e dots). * This may be the empty name. * * @throws IllegalArgumentException if this does not have at least that many components */ public CompoundName rest(int n) { if (n == 0) return this; if (compounds.size() < n) throw new IllegalArgumentException("Asked for the rest after " + n + " components but '" + this + "' only have " + compounds.size() + " components."); if (n == 1) return rest(); if (compounds.size() == n) return empty; return rest.rest(n - 1); } /** * Returns the number of compound elements in this. Which is exactly the number of dots in the string plus one. * The size of an empty compound is 0. */ public int size() { return compounds.size(); } /** * Returns the compound element as the given index */ public String get(int i) { return compounds.get(i); } /** * Returns a compound which have the name component at index i set to the given name. * As an optimization, if the given name == the name component at this index, this is returned. */ public CompoundName set(int i, String name) { if (get(i).equals(name)) return this; List<String> newCompounds = new ArrayList<>(compounds); newCompounds.set(i, name); return new CompoundName(newCompounds); } /** * Returns whether this name has more than one element */ public boolean isCompound() { return compounds.size() > 1; } public boolean isEmpty() { return compounds.isEmpty(); } /** * Returns whether the given name is a prefix of this. * Prefixes are taken on the component, not character level, so * "a" is a prefix of "a.b", but not a prefix of "ax.b */ public boolean hasPrefix(CompoundName prefix) { if (prefix.size() > this.size()) return false; int prefixLength = prefix.name.length(); if (prefixLength == 0) return true; if (name.length() > prefixLength && name.charAt(prefixLength) != '.') return false; return name.startsWith(prefix.name); } /** * Returns an immutable list of the components of this */ public List<String> asList() { return compounds; } @Override public int hashCode() { return hashCode; } @Override public boolean equals(Object arg) { if (arg == this) return true; return (arg instanceof CompoundName o) && name.equals(o.name); } /** * Returns the string representation of this - all the name components in order separated by dots. */ @Override public String toString() { return name; } public String getLowerCasedName() { return lowerCasedName; } private static String toCompoundString(String [] compounds) { int all = compounds.length; for (String compound : compounds) all += compound.length(); StringBuilder b = new StringBuilder(all); for (String compound : compounds) b.append(compound).append("."); return b.length()==0 ? "" : b.substring(0, b.length()-1); } /** * Creates a CompoundName from a string, possibly reusing from cache. * Prefer over constructing on the fly. **/ public static CompoundName from(String name) { CompoundName found = cache.get(name); if (found != null) return found; if (cache.size() < MAX_CACHE_SIZE) { CompoundName compound = new CompoundName(name, true); cache.put(name, compound); return compound; } return new CompoundName(name, false); } private static class ImmutableArrayList extends AbstractList<String> { private final String [] array; ImmutableArrayList(String [] array) { this.array = array; } @Override public String get(int index) { return array[index]; } @Override public int size() { return array.length; } @Override public int hashCode() { int hashCode = 0; for (String s : array) { hashCode = hashCode ^ s.hashCode(); } return hashCode; } } }
class CompoundName { private static final int MAX_CACHE_SIZE = 10_000; private static final Map<String, CompoundName> cache = new CopyOnWriteHashMap<>(); /** The empty compound */ public static final CompoundName empty = CompoundName.from(""); /* The string name of this compound. */ private final String name; private final String lowerCasedName; private final List<String> compounds; /** A hashcode which is always derived from the compounds (NEVER the string) */ private final int hashCode; /** This name with the first component removed */ private final CompoundName rest; /** This name with the last component removed */ private final CompoundName first; /** * Constructs this from a string which may contains dot-separated components * * @throws NullPointerException if name is null */ public CompoundName(String name) { this(name, false); } private CompoundName(String name, boolean useCache) { this(name, parse(name).toArray(new String[0]), useCache); } /** Constructs this from an array of name components which are assumed not to contain dots */ public static CompoundName fromComponents(String ... components) { return new CompoundName(List.of(components)); } /** Constructs this from a list of compounds. */ public CompoundName(List<String> compounds) { this(compounds.toArray(new String[0])); } private CompoundName(String [] compounds) { this(toCompoundString(compounds), compounds, false); } /** * Constructs this from a name with already parsed compounds. * Private to avoid creating names with inconsistencies. * * @param name the string representation of the compounds * @param compounds the compounds of this name */ private CompoundName(String name, String lowerCasedName, String[] compounds, CompoundName rest, CompoundName first) { this.name = name; this.lowerCasedName = lowerCasedName; this.compounds = new ImmutableArrayList(compounds); this.hashCode = this.compounds.hashCode(); this.rest = rest; this.first = first; } private static List<String> parse(String s) { ArrayList<String> l = null; int p = 0; final int m = s.length(); for (int i = 0; i < m; i++) { if (s.charAt(i) == '.') { if (l == null) l = new ArrayList<>(8); l.add(s.substring(p, i)); p = i + 1; } } if (p == 0) { if (l == null) return List.of(s); l.add(s); } else if (p < m) { l.add(s.substring(p, m)); } else { throw new IllegalArgumentException("'" + s + "' is not a legal compound name. Names can not end with a dot."); } return l; } /** * Returns a compound name which has the given compound string appended to it * * @param name if name is empty this returns <code>this</code> */ public CompoundName append(String name) { if (name.isEmpty()) return this; return append(new CompoundName(name)); } /** * Returns a compound name which has the given compounds appended to it * * @param name if name is empty this returns <code>this</code> */ public CompoundName append(CompoundName name) { if (name.isEmpty()) return this; if (isEmpty()) return name; String [] newCompounds = new String[compounds.size() + name.compounds.size()]; int count = 0; for (String s : compounds) { newCompounds[count++] = s; } for (String s : name.compounds) { newCompounds[count++] = s; } return new CompoundName(concat(this.name, name.name), newCompounds, false); } private static String concat(String name1, String name2) { return name1 + "." + name2; } /** * Returns a compound name which has the given name components prepended to this name, * in the given order, i.e new ComponentName("c").prepend("a","b") will yield "a.b.c". * * @param nameParts if name is empty this returns <code>this</code> */ public CompoundName prepend(String ... nameParts) { if (nameParts.length == 0) return this; if (isEmpty()) return fromComponents(nameParts); List<String> newCompounds = new ArrayList<>(nameParts.length + compounds.size()); newCompounds.addAll(Arrays.asList(nameParts)); newCompounds.addAll(this.compounds); return new CompoundName(newCompounds); } /** * Returns the name after the last dot. If there are no dots, the full name is returned. */ public String last() { if (compounds.isEmpty()) return ""; return compounds.get(compounds.size() - 1); } /** * Returns the name before the first dot. If there are no dots the full name is returned. */ public String first() { if (compounds.isEmpty()) return ""; return compounds.get(0); } /** * Returns the first n components of this. * * @throws IllegalArgumentException if this does not have at least n components */ public CompoundName first(int n) { if (compounds.size() < n) throw new IllegalArgumentException("Asked for the first " + n + " components but '" + this + "' only have " + compounds.size() + " components."); if (compounds.size() == n) return this; if (compounds.size() == 0) return empty; if (compounds.size() - 1 == n) return first; return first.first(n); } /** * Returns the name after the first dot, or "" if this name has no dots */ public CompoundName rest() { return rest; } /** * Returns the name starting after the n first components (i.e dots). * This may be the empty name. * * @throws IllegalArgumentException if this does not have at least that many components */ public CompoundName rest(int n) { if (n == 0) return this; if (compounds.size() < n) throw new IllegalArgumentException("Asked for the rest after " + n + " components but '" + this + "' only have " + compounds.size() + " components."); if (n == 1) return rest(); if (compounds.size() == n) return empty; return rest.rest(n - 1); } /** * Returns the number of compound elements in this. Which is exactly the number of dots in the string plus one. * The size of an empty compound is 0. */ public int size() { return compounds.size(); } /** * Returns the compound element as the given index */ public String get(int i) { return compounds.get(i); } /** * Returns a compound which have the name component at index i set to the given name. * As an optimization, if the given name == the name component at this index, this is returned. */ public CompoundName set(int i, String name) { if (get(i).equals(name)) return this; List<String> newCompounds = new ArrayList<>(compounds); newCompounds.set(i, name); return new CompoundName(newCompounds); } /** * Returns whether this name has more than one element */ public boolean isCompound() { return compounds.size() > 1; } public boolean isEmpty() { return compounds.isEmpty(); } /** * Returns whether the given name is a prefix of this. * Prefixes are taken on the component, not character level, so * "a" is a prefix of "a.b", but not a prefix of "ax.b */ public boolean hasPrefix(CompoundName prefix) { if (prefix.size() > this.size()) return false; int prefixLength = prefix.name.length(); if (prefixLength == 0) return true; if (name.length() > prefixLength && name.charAt(prefixLength) != '.') return false; return name.startsWith(prefix.name); } /** * Returns an immutable list of the components of this */ public List<String> asList() { return compounds; } @Override public int hashCode() { return hashCode; } @Override public boolean equals(Object arg) { if (arg == this) return true; return (arg instanceof CompoundName o) && name.equals(o.name); } /** * Returns the string representation of this - all the name components in order separated by dots. */ @Override public String toString() { return name; } public String getLowerCasedName() { return lowerCasedName; } private static String toCompoundString(String [] compounds) { int all = compounds.length; for (String compound : compounds) all += compound.length(); StringBuilder b = new StringBuilder(all); for (String compound : compounds) b.append(compound).append("."); return b.length()==0 ? "" : b.substring(0, b.length()-1); } /** * Creates a CompoundName from a string, possibly reusing from cache. * Prefer over constructing on the fly. **/ public static CompoundName from(String name) { CompoundName found = cache.get(name); if (found != null) return found; if (cache.size() < MAX_CACHE_SIZE) { CompoundName compound = new CompoundName(name, true); cache.put(name, compound); return compound; } return new CompoundName(name, false); } private static class ImmutableArrayList extends AbstractList<String> { private final String [] array; ImmutableArrayList(String [] array) { this.array = array; } @Override public String get(int index) { return array[index]; } @Override public int size() { return array.length; } @Override public int hashCode() { int hashCode = 0; for (String s : array) { hashCode = hashCode ^ s.hashCode(); } return hashCode; } } }
Fixed
private static void warnStreamingGramMatching(String cluster, Schema schema, DeployLogger logger) { for (ImmutableSDField sd : schema.allConcreteFields()) { if (sd.getMatching().getType() == MatchType.GRAM) { logger.logApplicationPackage(Level.WARNING, "For search cluster '" + cluster + "', schema '" + schema.getName() + "', SD field '" + sd.getName() + "': n-gram matching is not supported for streaming search."); } } }
logger.logApplicationPackage(Level.WARNING, "For search cluster '" + cluster + "', schema '" + schema.getName() +
private static void warnStreamingGramMatching(String cluster, Schema schema, DeployLogger logger) { for (ImmutableSDField sd : schema.allConcreteFields()) { if (sd.getMatching().getType() == MatchType.GRAM) { logger.logApplicationPackage(Level.WARNING, "For search cluster '" + cluster + "', streaming schema '" + schema.getName() + "', SD field '" + sd.getName() + "': n-gram matching is not supported for streaming search."); } } }
class StreamingValidator implements Validator { @Override public void validate(Context context) { List<SearchCluster> searchClusters = context.model().getSearchClusters(); for (SearchCluster cluster : searchClusters) { for (SchemaInfo schemaInfo : cluster.schemas().values()) { if (schemaInfo.getIndexMode() == SchemaInfo.IndexMode.STREAMING) { var deployLogger = context.deployState().getDeployLogger(); warnStreamingAttributes(cluster.getClusterName(), schemaInfo.fullSchema(), deployLogger); warnStreamingGramMatching(cluster.getClusterName(), schemaInfo.fullSchema(), deployLogger); failStreamingDocumentReferences(cluster.getClusterName(), cluster.getDocumentDB(schemaInfo.name()).getDerivedConfiguration(), context); } } } } /** * Warn if one or more attributes are defined in a streaming search cluster SD. */ private static void warnStreamingAttributes(String cluster, Schema schema, DeployLogger logger) { for (ImmutableSDField sd : schema.allConcreteFields()) { if (sd.doesAttributing()) { warnStreamingAttribute(cluster, schema.getName(), sd, logger); } } } private static void warnStreamingAttribute(String cluster, String schema, ImmutableSDField sd, DeployLogger logger) { if (sd.getDataType() instanceof NumericDataType) return; if (sd.getDataType() instanceof TensorDataType) { for (var fieldAttribute : sd.getAttributes().values()) { if (fieldAttribute.hnswIndexParams().isPresent()) { logger.logApplicationPackage(Level.WARNING, "For search cluster '" + cluster + "', schema '" + schema + "', SD field '" + sd.getName() + "': hnsw index is not relevant and not supported, ignoring setting"); } } return; } logger.logApplicationPackage(Level.WARNING, "For search cluster '" + cluster + "', SD field '" + sd.getName() + "': 'attribute' has same match semantics as 'index'."); } private static void failStreamingDocumentReferences(String cluster, DerivedConfiguration derived, Context context) { for (Attribute attribute : derived.getAttributeFields().attributes()) { DataType dataType = attribute.getDataType(); if (dataType instanceof NewDocumentReferenceDataType) { String errorMessage = String.format("For search cluster '%s', schema '%s': Attribute '%s' has type '%s'. " + "Document references and imported fields are not allowed in streaming search.", cluster, derived.getSchema().getName(), attribute.getName(), dataType.getName()); context.illegal(errorMessage); } } } }
class StreamingValidator implements Validator { @Override public void validate(Context context) { List<SearchCluster> searchClusters = context.model().getSearchClusters(); for (SearchCluster cluster : searchClusters) { for (SchemaInfo schemaInfo : cluster.schemas().values()) { if (schemaInfo.getIndexMode() == SchemaInfo.IndexMode.STREAMING) { var deployLogger = context.deployState().getDeployLogger(); warnStreamingAttributes(cluster.getClusterName(), schemaInfo.fullSchema(), deployLogger); warnStreamingGramMatching(cluster.getClusterName(), schemaInfo.fullSchema(), deployLogger); failStreamingDocumentReferences(cluster.getClusterName(), cluster.getDocumentDB(schemaInfo.name()).getDerivedConfiguration(), context); } } } } /** * Warn if one or more attributes are defined in a streaming search cluster SD. */ private static void warnStreamingAttributes(String cluster, Schema schema, DeployLogger logger) { for (ImmutableSDField sd : schema.allConcreteFields()) { if (sd.doesAttributing()) { warnStreamingAttribute(cluster, schema.getName(), sd, logger); } } } private static void warnStreamingAttribute(String cluster, String schema, ImmutableSDField sd, DeployLogger logger) { if (sd.getDataType() instanceof NumericDataType) return; if (sd.getDataType() instanceof TensorDataType) { for (var fieldAttribute : sd.getAttributes().values()) { if (fieldAttribute.hnswIndexParams().isPresent()) { logger.logApplicationPackage(Level.WARNING, "For search cluster '" + cluster + "', streaming schema '" + schema + "', SD field '" + sd.getName() + "': hnsw index is not relevant and not supported, ignoring setting"); } } return; } } private static void failStreamingDocumentReferences(String cluster, DerivedConfiguration derived, Context context) { for (Attribute attribute : derived.getAttributeFields().attributes()) { DataType dataType = attribute.getDataType(); if (dataType instanceof NewDocumentReferenceDataType) { String errorMessage = String.format("For search cluster '%s', streaming schema '%s': Attribute '%s' has type '%s'. " + "Document references and imported fields are not allowed in streaming search.", cluster, derived.getSchema().getName(), attribute.getName(), dataType.getName()); context.illegal(errorMessage); } } } }
I think we need to ponder and discuss the desired semantics for this one a bit, probably when @hakonhall is back
private Result checkAllNodesAreUp(ClusterState clusterState) { if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState(); for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState();
private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
Discussed with @vekterli : The original check can stand. `checkAllNodesAreUp()` is only called if all nodes are supposed to be up and as a sanity-check before checking for redundancy. Otherwise, if another node is already set in MAINTENANCE it should be disallowed if too many are already set (`anotherNodeInAnotherGroupHasWantedState()`) or allowed (`anotherNodeInGroupAlreadyAllowed()`).
private Result checkAllNodesAreUp(ClusterState clusterState) { if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState(); for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState();
private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
WIll fix
private static ClusterState defaultAllUpClusterState(int nodeCount) { return clusterState(String.format("version:%d distributor:" + nodeCount + " storage:" + nodeCount, currentClusterStateVersion)); }
return clusterState(String.format("version:%d distributor:" + nodeCount + " storage:" + nodeCount, currentClusterStateVersion));
private static ClusterState defaultAllUpClusterState(int nodeCount) { return clusterState(String.format("version:%d distributor:%d storage:%d", currentClusterStateVersion, nodeCount , nodeCount)); }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { try { return new ClusterState(state); } catch (ParseException e) { throw new RuntimeException(e); } } private static ClusterState defaultAllUpClusterState() { return defaultAllUpClusterState(4); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount) { return createCluster(nodeCount, 1); } private ContentCluster createCluster(int nodeCount, int groupCount) { Collection<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @Test void testCanUpgradeForce() { var nodeStateChangeChecker = createChangeChecker(createCluster(1)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDeniedInMoratorium() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @Test void testUnknownStorageNode() { ContentCluster cluster = createCluster(4); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); setStorageNodeWantedStateToMaintenance(cluster, 0); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testMaintenanceAllowedFor2Of4Groups() { Collection<ConfiguredNode> nodes = createNodes(4); StorDistributionConfig config = createDistributionConfig(4, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, defaultAllUpClusterState()); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 .0.s:d storage:4 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 storage:4 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Nodes in 2 groups are already down, cannot take down another node", result.getReason()); } } @Test void testMaintenanceAllowedFor2Of4Groups8Nodes() { Collection<ConfiguredNode> nodes = createNodes(8); StorDistributionConfig config = createDistributionConfig(8, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { ClusterState clusterState = defaultAllUpClusterState(8); int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 .0.s:d storage:8 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 4; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Nodes in 2 groups are already down, cannot take down another node", result.getReason()); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 3; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } } @Test void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() { ContentCluster cluster = createCluster(4); setDistributorNodeWantedState(cluster, 0, new NodeState(DISTRIBUTOR, DOWN), "Orchestrator"); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() { ContentCluster cluster = createCluster(4, 2); setDistributorNodeWantedState(cluster, 0, new NodeState(DISTRIBUTOR, DOWN), "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } @Test void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() { ContentCluster cluster = createCluster(4, 2); setStorageNodeWantedState(cluster, 0, new NodeState(STORAGE, MAINTENANCE), "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @Test void testSafeSetStateDistributors() { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @Test void testCanUpgradeSafeMissingStorage() { ContentCluster cluster = createCluster(4); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @Test void testCanUpgradeStorageSafeYes() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpFailsIfReportedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanSetUpEvenIfOldWantedStateIsDown() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeStorageSafeNo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @Test void testCanUpgradeIfMissingMinReplicationFactor() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeIfStorageNodeMissingFromNodeInfo() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testMissingDistributorState() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription) { return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription); } @Test void testSettingUpWhenUpCausesAlreadySet() { Result result = transitionToSameState(UP, "foo", "bar"); assertTrue(result.wantedStateAlreadySet()); } @Test void testSettingAlreadySetState() { Result result = transitionToSameState("foo", "foo"); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @Test void testDifferentDescriptionImpliesDenied() { Result result = transitionToSameState("foo", "bar"); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, UP), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @Test void testCanUpgradeWhenAllUp() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenAllUpOrRetired() { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCanUpgradeWhenStorageIsDown() { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testCannotUpgradeWhenOtherStorageIsDown() { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @Test void testNodeRatioRequirementConsidersGeneratedNodeStates() { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @Test void testDownDisallowedByNonRetiredState() { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @Test void testDownDisallowedByBuckets() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @Test void testDownDisallowedByReportedState() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @Test void testDownDisallowedByVersionMismatch() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @Test void testAllowedToSetDown() { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets) { ContentCluster cluster = createCluster(4); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } private void checkSettingToMaintenanceIsAllowed(int nodeIndex, NodeStateChangeChecker nodeStateChangeChecker, ClusterState clusterState) { Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Preconditions fulfilled and new state different", result.getReason()); } private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex) { setStorageNodeWantedStateToMaintenance(cluster, nodeIndex, "Orchestrator"); } private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex, String description) { cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setWantedState(MAINTENANCE_NODE_STATE.setDescription(description)); } private void setStorageNodeWantedState(ContentCluster cluster, int nodeIndex, NodeState state, String description) { cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setWantedState(state.setDescription(description)); } private void setDistributorNodeWantedState(ContentCluster cluster, int nodeIndex, NodeState state, String description) { cluster.clusterInfo().getDistributorNodeInfo(nodeIndex).setWantedState(state.setDescription(description)); } }
class NodeStateChangeCheckerTest { private static final int requiredRedundancy = 4; private static final int currentClusterStateVersion = 2; private static final Node nodeDistributor = new Node(DISTRIBUTOR, 1); private static final Node nodeStorage = new Node(STORAGE, 1); private static final NodeState UP_NODE_STATE = new NodeState(STORAGE, UP); private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(MAINTENANCE, "Orchestrator"); private static final NodeState DOWN_NODE_STATE = createNodeState(DOWN, "RetireEarlyExpirer"); private static NodeState createNodeState(State state, String description) { return new NodeState(STORAGE, state).setDescription(description); } private static ClusterState clusterState(String state) { return ClusterState.stateFromString(state); } private static ClusterState defaultAllUpClusterState() { return defaultAllUpClusterState(4); } private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) { return new NodeStateChangeChecker(cluster, false); } private ContentCluster createCluster(int nodeCount, int maxNumberOfGroupsAllowedToBeDown) { return createCluster(nodeCount, 1, maxNumberOfGroupsAllowedToBeDown); } private ContentCluster createCluster(int nodeCount, int groupCount, int maxNumberOfGroupsAllowedToBeDown) { List<ConfiguredNode> nodes = createNodes(nodeCount); Distribution distribution = new Distribution(createDistributionConfig(nodeCount, groupCount)); return new ContentCluster("Clustername", nodes, distribution, maxNumberOfGroupsAllowedToBeDown); } private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) { return "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + replicationfactor1 + "\n" + " },\n" + " {\n" + " \"node-index\": 1,\n" + " \"min-current-replication-factor\": " + replicationfactor2 + "\n" + " },\n" + " {\n" + " \"node-index\": 2,\n" + " \"min-current-replication-factor\": " + replicationfactor3 + "\n" + " },\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]\n" + " }\n" + "}\n"; } private void markAllNodesAsReportingStateUp(ContentCluster cluster) { final ClusterInfo clusterInfo = cluster.clusterInfo(); final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size(); for (int i = 0; i < configuredNodeCount; i++) { clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(STORAGE, UP), 0); } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWithForce(int maxNumberOfGroupsAllowedToBeDown) { var nodeStateChangeChecker = createChangeChecker(createCluster(1, maxNumberOfGroupsAllowedToBeDown)); NodeState newState = new NodeState(STORAGE, INITIALIZING); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), FORCE, UP_NODE_STATE, newState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDeniedInMoratorium(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, true); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testUnknownStorageNode(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Unknown node storage.10", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); setStorageNodeWantedStateToMaintenance(cluster, 0); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @Test void testMaintenanceAllowedFor2Of4Groups() { Collection<ConfiguredNode> nodes = createNodes(4); StorDistributionConfig config = createDistributionConfig(4, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, defaultAllUpClusterState()); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 .0.s:d storage:4 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 storage:4 .0.s:m .1.s:m .2.s:d", currentClusterStateVersion)); int nodeIndex = 2; cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setReportedState(new NodeState(STORAGE, DOWN), 0); Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most 2 groups can have wanted state: [0, 1, 2]", result.getReason()); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:4 storage:4 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason()); } } @Test void testMaintenanceAllowedFor2Of4Groups8Nodes() { Collection<ConfiguredNode> nodes = createNodes(8); StorDistributionConfig config = createDistributionConfig(8, 4); int maxNumberOfGroupsAllowedToBeDown = 2; var cluster = new ContentCluster("Clustername", nodes, new Distribution(config), maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); var nodeStateChangeChecker = createChangeChecker(cluster); { ClusterState clusterState = defaultAllUpClusterState(8); int nodeIndex = 0; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 .0.s:d storage:8 .0.s:m", currentClusterStateVersion)); int nodeIndex = 1; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m", currentClusterStateVersion)); int nodeIndex = 2; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 4; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason()); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .2.s:m", currentClusterStateVersion)); int nodeIndex = 3; checkSettingToMaintenanceIsAllowed(nodeIndex, nodeStateChangeChecker, clusterState); setStorageNodeWantedStateToMaintenance(cluster, nodeIndex); } { ClusterState clusterState = clusterState(String.format("version:%d distributor:8 storage:8 .0.s:m .1.s:m .3.s:m", currentClusterStateVersion)); setStorageNodeWantedState(cluster, 3, MAINTENANCE, "Maintenance, set by operator"); setStorageNodeWantedState(cluster, 2, UP, ""); int nodeIndex = 2; Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); setDistributorNodeWantedState(cluster, 0, DOWN, "Orchestrator"); var nodeStateChangeChecker = createChangeChecker(cluster); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("At most one node can have a wanted state when result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, 2, maxNumberOfGroupsAllowedToBeDown); setDistributorNodeWantedState(cluster, 0, DOWN, "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 .0.s:d storage:4", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); if (maxNumberOfGroupsAllowedToBeDown >= 1) assertEquals("Wanted state already set for another node in groups: [0]", result.getReason()); else assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); if (maxNumberOfGroupsAllowedToBeDown >= 1) { assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Wanted state already set for another node in groups: [0]", result.getReason()); } else { assertFalse(result.settingWantedStateIsAllowed(), result.getReason()); assertEquals("Another distributor wants state DOWN: 0", result.getReason()); } } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, 2, maxNumberOfGroupsAllowedToBeDown); setStorageNodeWantedState(cluster, 0, MAINTENANCE, "Orchestrator"); var nodeStateChangeChecker = new NodeStateChangeChecker(cluster, false); ClusterState clusterStateWith0InMaintenance = clusterState(String.format( "version:%d distributor:4 storage:4 .0.s:m", currentClusterStateVersion)); { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 2), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); if (maxNumberOfGroupsAllowedToBeDown >= 1) assertEquals("At most 1 groups can have wanted state: [0]", result.getReason()); else assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance", result.getReason()); } { Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), clusterStateWith0InMaintenance, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.getReason()); assertFalse(result.wantedStateAlreadySet()); } } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSafeSetStateDistributors(int maxNumberOfGroupsAllowedToBeDown) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(1, 1, maxNumberOfGroupsAllowedToBeDown)); Result result = nodeStateChangeChecker.evaluateTransition( nodeDistributor, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes")); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeSafeMissingStorage(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(3).setReportedState(new NodeState(STORAGE, DOWN), 0); ClusterState clusterStateWith3Down = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); var nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterStateWith3Down, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Another storage node has state DOWN: 3", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeStorageSafeYes(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, 1, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSetUpFailsIfReportedIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .%d.s:d", currentClusterStateVersion, nodeStorage.getIndex())); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, MAINTENANCE_NODE_STATE, UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanSetUpEvenIfOldWantedStateIsDown(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, new NodeState(STORAGE, DOWN), UP_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeStorageSafeNo(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeIfMissingMinReplicationFactor(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6))); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeIfStorageNodeMissingFromNodeInfo(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); String hostInfo = "{\n" + " \"cluster-state-version\": 2,\n" + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 0,\n" + " \"min-current-replication-factor\": " + requiredRedundancy + "\n" + " }\n" + " ]\n" + " }\n" + "}\n"; setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo)); Result result = nodeStateChangeChecker.evaluateTransition( new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testMissingDistributorState(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(STORAGE, UP), 0); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason()); } private Result transitionToSameState(State state, String oldDescription, String newDescription, int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); NodeState currentNodeState = createNodeState(state, oldDescription); NodeState newNodeState = createNodeState(state, newDescription); return nodeStateChangeChecker.evaluateTransition( nodeStorage, defaultAllUpClusterState(), SAFE, currentNodeState, newNodeState); } private Result transitionToSameState(String oldDescription, String newDescription, int maxNumberOfGroupsAllowedToBeDown) { return transitionToSameState(MAINTENANCE, oldDescription, newDescription, maxNumberOfGroupsAllowedToBeDown); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSettingUpWhenUpCausesAlreadySet(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToSameState(UP, "foo", "bar", maxNumberOfGroupsAllowedToBeDown); assertTrue(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testSettingAlreadySetState(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToSameState("foo", "foo", maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertTrue(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDifferentDescriptionImpliesDenied(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToSameState("foo", "bar", maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) { NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, UP), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6))); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, UP), 0); } return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); } private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) { for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) { State state = UP; cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(DISTRIBUTOR, state), 0); cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo); cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(STORAGE, state), 0); } } private Result transitionToMaintenanceWithNoStorageNodesDown(ContentCluster cluster, ClusterState clusterState) { return transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWhenAllUp(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWhenAllUpOrRetired(int maxNumberOfGroupsAllowedToBeDown) { Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState()); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCanUpgradeWhenStorageIsDown(int maxNumberOfGroupsAllowedToBeDown) { ClusterState clusterState = defaultAllUpClusterState(); var storageNodeIndex = nodeStorage.getIndex(); ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testCannotUpgradeWhenOtherStorageIsDown(int maxNumberOfGroupsAllowedToBeDown) { int otherIndex = 2; assertNotEquals(nodeStorage.getIndex(), otherIndex); ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); ClusterState clusterState = defaultAllUpClusterState(); NodeState downNodeState = new NodeState(STORAGE, DOWN); cluster.clusterInfo().getStorageNodeInfo(otherIndex).setReportedState(downNodeState, 4 /* time */); clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState); Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertTrue(result.getReason().contains("Another storage node has state DOWN: 2")); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testNodeRatioRequirementConsidersGeneratedNodeStates(int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); markAllNodesAsReportingStateUp(cluster); ClusterState stateWithNodeDown = clusterState(String.format( "version:%d distributor:4 storage:4 .3.s:d", currentClusterStateVersion)); Result result = nodeStateChangeChecker.evaluateTransition( nodeStorage, stateWithNodeDown, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByNonRetiredState(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( defaultAllUpClusterState(), UP, currentClusterStateVersion, 0, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByBuckets(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 1, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("The storage node manages 1 buckets", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByReportedState(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), INITIALIZING, currentClusterStateVersion, 0, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testDownDisallowedByVersionMismatch(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion - 1, 0, maxNumberOfGroupsAllowedToBeDown); assertFalse(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1", result.getReason()); } @ParameterizedTest @ValueSource(ints = {-1, 1}) void testAllowedToSetDown(int maxNumberOfGroupsAllowedToBeDown) { Result result = evaluateDownTransition( retiredClusterStateSuffix(), UP, currentClusterStateVersion, 0, maxNumberOfGroupsAllowedToBeDown); assertTrue(result.settingWantedStateIsAllowed()); assertFalse(result.wantedStateAlreadySet()); } private Result evaluateDownTransition(ClusterState clusterState, State reportedState, int hostInfoClusterStateVersion, int lastAlldisksBuckets, int maxNumberOfGroupsAllowedToBeDown) { ContentCluster cluster = createCluster(4, maxNumberOfGroupsAllowedToBeDown); NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster); StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex()); nodeInfo.setReportedState(new NodeState(STORAGE, reportedState), 0); nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets)); return nodeStateChangeChecker.evaluateTransition( nodeStorage, clusterState, SAFE, UP_NODE_STATE, DOWN_NODE_STATE); } private ClusterState retiredClusterStateSuffix() { return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r", currentClusterStateVersion, nodeStorage.getIndex())); } private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) { return HostInfo.createHostInfo(String.format("{\n" + " \"metrics\":\n" + " {\n" + " \"snapshot\":\n" + " {\n" + " \"from\":1494940706,\n" + " \"to\":1494940766\n" + " },\n" + " \"values\":\n" + " [\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.buckets\",\n" + " \"description\":\"buckets managed\",\n" + " \"values\":\n" + " {\n" + " \"average\":262144.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":262144,\n" + " \"max\":262144,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.alldisks.docs\",\n" + " \"description\":\"documents stored\",\n" + " \"values\":\n" + " {\n" + " \"average\":154689587.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":154689587,\n" + " \"max\":154689587,\n" + " \"last\":154689587\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":0.0,\n" + " \"sum\":0.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":0,\n" + " \"max\":0,\n" + " \"last\":0\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"global\"\n" + " }\n" + " },\n" + " {\n" + " \"name\":\"vds.datastored.bucket_space.buckets_total\",\n" + " \"description\":\"Total number buckets present in the bucket space (ready + not ready)\",\n" + " \"values\":\n" + " {\n" + " \"average\":129.0,\n" + " \"sum\":129.0,\n" + " \"count\":1,\n" + " \"rate\":0.016666,\n" + " \"min\":129,\n" + " \"max\":129,\n" + " \"last\":%d\n" + " },\n" + " \"dimensions\":\n" + " {\n" + " \"bucketSpace\":\"default\"\n" + " }\n" + " }\n" + " ]\n" + " },\n" + " \"cluster-state-version\":%d\n" + "}", lastAlldisksBuckets, lastAlldisksBuckets, clusterStateVersion)); } private List<ConfiguredNode> createNodes(int count) { List<ConfiguredNode> nodes = new ArrayList<>(); for (int i = 0; i < count; i++) nodes.add(new ConfiguredNode(i, false)); return nodes; } private StorDistributionConfig createDistributionConfig(int nodes) { var configBuilder = new StorDistributionConfig.Builder() .ready_copies(requiredRedundancy) .redundancy(requiredRedundancy) .initial_redundancy(requiredRedundancy); var groupBuilder = new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes); int nodeIndex = 0; for (int j = 0; j < nodes; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); return configBuilder.build(); } private StorDistributionConfig createDistributionConfig(int nodes, int groups) { if (groups == 1) return createDistributionConfig(nodes); if (nodes % groups != 0) throw new IllegalArgumentException("Cannot have " + groups + " groups with an odd number of nodes: " + nodes); int nodesPerGroup = nodes / groups; var configBuilder = new StorDistributionConfig.Builder() .active_per_leaf_group(true) .ready_copies(groups) .redundancy(groups) .initial_redundancy(groups); configBuilder.group(new StorDistributionConfig.Group.Builder() .index("invalid") .name("invalid") .capacity(nodes) .partitions("1|*")); int nodeIndex = 0; for (int i = 0; i < groups; ++i) { var groupBuilder = new StorDistributionConfig.Group.Builder() .index(String.valueOf(i)) .name(String.valueOf(i)) .capacity(nodesPerGroup) .partitions(""); for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) { groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder() .index(nodeIndex)); } configBuilder.group(groupBuilder); } return configBuilder.build(); } private void checkSettingToMaintenanceIsAllowed(int nodeIndex, NodeStateChangeChecker nodeStateChangeChecker, ClusterState clusterState) { Node node = new Node(STORAGE, nodeIndex); Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE); assertTrue(result.settingWantedStateIsAllowed(), result.toString()); assertFalse(result.wantedStateAlreadySet()); assertEquals("Preconditions fulfilled and new state different", result.getReason()); } private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex) { setStorageNodeWantedState(cluster, nodeIndex, MAINTENANCE, "Orchestrator"); } private void setStorageNodeWantedState(ContentCluster cluster, int nodeIndex, State state, String description) { NodeState nodeState = new NodeState(STORAGE, state); cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setWantedState(nodeState.setDescription(description)); } private void setDistributorNodeWantedState(ContentCluster cluster, int nodeIndex, State state, String description) { NodeState nodeState = new NodeState(DISTRIBUTOR, state); cluster.clusterInfo().getDistributorNodeInfo(nodeIndex).setWantedState(nodeState.setDescription(description)); } }
Not sure If I understand how more than one node can be set to maintenance (as is the goal when allowing more than one group to be down at a time) if that is the case? This methods checks both wanted state and actual state, so name is a bit misleading.
private Result checkAllNodesAreUp(ClusterState clusterState) { if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState(); for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState();
private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
Discussed offline
private Result checkAllNodesAreUp(ClusterState clusterState) { if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState(); for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState();
private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
This should also be caught during prepare: fail the deployment and return such an error message.
public NodeStateChangeChecker(ContentCluster cluster, boolean inMoratorium) { this.requiredRedundancy = cluster.getDistribution().getRedundancy(); this.groupVisiting = new HierarchicalGroupVisiting(cluster.getDistribution()); this.clusterInfo = cluster.clusterInfo(); this.inMoratorium = inMoratorium; this.maxNumberOfGroupsAllowedToBeDown = cluster.maxNumberOfGroupsAllowedToBeDown(); if ( ! groupVisiting.isHierarchical() && maxNumberOfGroupsAllowedToBeDown > 1) throw new IllegalArgumentException("Cannot have both 1 group and maxNumberOfGroupsAllowedToBeDown > 1"); }
if ( ! groupVisiting.isHierarchical() && maxNumberOfGroupsAllowedToBeDown > 1)
public NodeStateChangeChecker(ContentCluster cluster, boolean inMoratorium) { this.requiredRedundancy = cluster.getDistribution().getRedundancy(); this.groupVisiting = new HierarchicalGroupVisiting(cluster.getDistribution()); this.clusterInfo = cluster.clusterInfo(); this.inMoratorium = inMoratorium; this.maxNumberOfGroupsAllowedToBeDown = cluster.maxNumberOfGroupsAllowedToBeDown(); if ( ! groupVisiting.isHierarchical() && maxNumberOfGroupsAllowedToBeDown > 1) throw new IllegalArgumentException("Cannot have both 1 group and maxNumberOfGroupsAllowedToBeDown > 1"); }
class NodeStateChangeChecker { private static final Logger log = Logger.getLogger(NodeStateChangeChecker.class.getName()); private static final String BUCKETS_METRIC_NAME = "vds.datastored.bucket_space.buckets_total"; private static final Map<String, String> BUCKETS_METRIC_DIMENSIONS = Map.of("bucketSpace", "default"); private final int requiredRedundancy; private final HierarchicalGroupVisiting groupVisiting; private final ClusterInfo clusterInfo; private final boolean inMoratorium; private final int maxNumberOfGroupsAllowedToBeDown; public static class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } } public Result evaluateTransition(Node node, ClusterState clusterState, SetUnitStateRequest.Condition condition, NodeState oldWantedState, NodeState newWantedState) { if (condition == FORCE) { return allowSettingOfWantedState(); } if (inMoratorium) { return createDisallowed("Master cluster controller is bootstrapping and in moratorium"); } if (condition != SAFE) { return createDisallowed("Condition not implemented: " + condition.name()); } if (node.getType() != STORAGE) { return createDisallowed("Safe-set of node state is only supported for storage nodes! " + "Requested node type: " + node.getType().toString()); } StorageNodeInfo nodeInfo = clusterInfo.getStorageNodeInfo(node.getIndex()); if (nodeInfo == null) { return createDisallowed("Unknown node " + node); } if (newWantedState.getState().equals(oldWantedState.getState()) && Objects.equals(newWantedState.getDescription(), oldWantedState.getDescription())) { return createAlreadySet(); } return switch (newWantedState.getState()) { case UP -> canSetStateUp(nodeInfo, oldWantedState); case MAINTENANCE -> canSetStateMaintenanceTemporarily(nodeInfo, clusterState, newWantedState.getDescription()); case DOWN -> canSetStateDownPermanently(nodeInfo, clusterState, newWantedState.getDescription()); default -> createDisallowed("Destination node state unsupported in safe mode: " + newWantedState); }; } private Result canSetStateDownPermanently(NodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } State reportedState = nodeInfo.getReportedState().getState(); if (reportedState != UP) { return createDisallowed("Reported state (" + reportedState + ") is not UP, so no bucket data is available"); } State currentState = clusterState.getNodeState(nodeInfo.getNode()).getState(); if (currentState != RETIRED) { return createDisallowed("Only retired nodes are allowed to be set to DOWN in safe mode - is " + currentState); } HostInfo hostInfo = nodeInfo.getHostInfo(); Integer hostInfoNodeVersion = hostInfo.getClusterStateVersionOrNull(); int clusterControllerVersion = clusterState.getVersion(); if (hostInfoNodeVersion == null || hostInfoNodeVersion != clusterControllerVersion) { return createDisallowed("Cluster controller at version " + clusterControllerVersion + " got info for storage node " + nodeInfo.getNodeIndex() + " at a different version " + hostInfoNodeVersion); } Optional<Metrics.Value> bucketsMetric; bucketsMetric = hostInfo.getMetrics().getValueAt(BUCKETS_METRIC_NAME, BUCKETS_METRIC_DIMENSIONS); if (bucketsMetric.isEmpty() || bucketsMetric.get().getLast() == null) { return createDisallowed("Missing last value of the " + BUCKETS_METRIC_NAME + " metric for storage node " + nodeInfo.getNodeIndex()); } long lastBuckets = bucketsMetric.get().getLast(); if (lastBuckets > 0) { return createDisallowed("The storage node manages " + lastBuckets + " buckets"); } return allowSettingOfWantedState(); } private Result canSetStateUp(NodeInfo nodeInfo, NodeState oldWantedState) { if (oldWantedState.getState() == UP) { return createAlreadySet(); } if (nodeInfo.getReportedState().getState() != UP) { return createDisallowed("Refuse to set wanted state to UP, " + "since the reported state is not UP (" + nodeInfo.getReportedState().getState() + ")"); } return allowSettingOfWantedState(); } private Result canSetStateMaintenanceTemporarily(StorageNodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } if (clusterState.getNodeState(nodeInfo.getNode()).getState() == DOWN) { log.log(FINE, "node is DOWN, allow"); return allowSettingOfWantedState(); } if (maxNumberOfGroupsAllowedToBeDown == -1) { var otherGroupCheck = anotherNodeInAnotherGroupHasWantedState(nodeInfo); if (!otherGroupCheck.settingWantedStateIsAllowed()) { return otherGroupCheck; } if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) { return allowSettingOfWantedState(); } } else { var result = otherNodesHaveWantedState(nodeInfo, newDescription); if (result.isPresent()) return result.get(); } Result allNodesAreUpCheck = checkAllNodesAreUp(clusterState); if (!allNodesAreUpCheck.settingWantedStateIsAllowed()) { log.log(FINE, "allNodesAreUpCheck: " + allNodesAreUpCheck); return allNodesAreUpCheck; } Result checkDistributorsResult = checkDistributors(nodeInfo.getNode(), clusterState.getVersion()); if (!checkDistributorsResult.settingWantedStateIsAllowed()) { log.log(FINE, "checkDistributors: "+ checkDistributorsResult); return checkDistributorsResult; } return allowSettingOfWantedState(); } /** * Returns a disallow-result if there is another node (in another group, if hierarchical) * that has a wanted state != UP. We disallow more than 1 suspended node/group at a time. */ private Result anotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) { if (groupVisiting.isHierarchical()) { SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>(); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { Result result = otherNodeInGroupHasWantedState(group); if (!result.settingWantedStateIsAllowed()) { anotherNodeHasWantedState.set(result); return false; } } return true; }); return anotherNodeHasWantedState.asOptional().orElseGet(Result::allowSettingOfWantedState); } else { return otherNodeHasWantedState(nodeInfo); } } /** * Returns an optional Result, where return value is: * For flat setup: Return Optional.of(disallowed) if wanted state is set on some node, else Optional.empty * For hierarchical setup: No wanted state for other nodes, return Optional.empty * Wanted state for nodes/groups are not UP: * if less than maxNumberOfGroupsAllowedToBeDown: return Optional.of(allowed) * else: if node is in group with nodes already down: return Optional.of(allowed), else Optional.of(disallowed) */ private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription) { Node node = nodeInfo.getNode(); if (groupVisiting.isHierarchical()) { if (maxNumberOfGroupsAllowedToBeDown <= 1) { SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>(); groupVisiting.visit(group -> { if (!groupContainsNode(group, node)) { Result result = otherNodeInGroupHasWantedState(group); if (!result.settingWantedStateIsAllowed()) { anotherNodeHasWantedState.set(result); return false; } } return true; }); if (anotherNodeHasWantedState.isPresent()) { log.log(FINE, "anotherNodeHasWantedState: " + anotherNodeHasWantedState.get()); return Optional.of(anotherNodeHasWantedState.get()); } if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) { log.log(FINE, "anotherNodeInGroupAlreadyAllowed, allow"); return Optional.of(allowSettingOfWantedState()); } } else { Set<Integer> groupsWithStorageNodesWantedStateNotUp = groupsWithStorageNodesWantedStateNotUp(); String disallowMessage = "At most nodes in " + maxNumberOfGroupsAllowedToBeDown + " groups can have wanted state"; if (groupsWithStorageNodesWantedStateNotUp.size() < maxNumberOfGroupsAllowedToBeDown) return Optional.of(allowSettingOfWantedState()); if (groupsWithStorageNodesWantedStateNotUp.size() > maxNumberOfGroupsAllowedToBeDown) return Optional.of(createDisallowed(disallowMessage)); if (aGroupContainsNode(groupsWithStorageNodesWantedStateNotUp, node)) return Optional.of(allowSettingOfWantedState()); return Optional.of(createDisallowed(disallowMessage)); } } else { var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo); if ( ! otherNodeHasWantedState.settingWantedStateIsAllowed()) return Optional.of(otherNodeHasWantedState); } return Optional.empty(); } /** Returns a disallow-result, if there is a node in the group with wanted state != UP. */ private Result otherNodeInGroupHasWantedState(Group group) { for (var configuredNode : group.getNodes()) { int index = configuredNode.index(); StorageNodeInfo storageNodeInfo = clusterInfo.getStorageNodeInfo(index); if (storageNodeInfo == null) continue; State storageNodeWantedState = storageNodeInfo.getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other storage node " + index + " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other distributor " + index + " in group " + group.getIndex() + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private Result otherNodeHasWantedState(StorageNodeInfo nodeInfo) { for (var configuredNode : clusterInfo.getConfiguredNodes().values()) { int index = configuredNode.index(); if (index == nodeInfo.getNodeIndex()) { continue; } State storageNodeWantedState = clusterInfo.getStorageNodeInfo(index).getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private boolean anotherNodeInGroupAlreadyAllowed(StorageNodeInfo nodeInfo, String newDescription) { MutableBoolean alreadyAllowed = new MutableBoolean(false); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { return true; } alreadyAllowed.set(anotherNodeInGroupAlreadyAllowed(group, nodeInfo.getNode(), newDescription)); return false; }); return alreadyAllowed.get(); } private boolean anotherNodeInGroupAlreadyAllowed(Group group, Node node, String newDescription) { return group.getNodes().stream() .filter(configuredNode -> configuredNode.index() != node.getIndex()) .map(configuredNode -> clusterInfo.getStorageNodeInfo(configuredNode.index())) .filter(Objects::nonNull) .map(NodeInfo::getUserWantedState) .anyMatch(userWantedState -> userWantedState.getState() == State.MAINTENANCE && Objects.equals(userWantedState.getDescription(), newDescription)); } private static boolean groupContainsNode(Group group, Node node) { for (ConfiguredNode configuredNode : group.getNodes()) { if (configuredNode.index() == node.getIndex()) { return true; } } return false; } private boolean aGroupContainsNode(Collection<Integer> groupIndexes, Node node) { for (Group group : getGroupsWithIndexes(groupIndexes)) { if (groupContainsNode(group, node)) return true; } return false; } private List<Group> getGroupsWithIndexes(Collection<Integer> groupIndexes) { return clusterInfo.getStorageNodeInfos().stream() .map(NodeInfo::getGroup) .filter(group -> groupIndexes.contains(group.getIndex())) .collect(Collectors.toList()); } private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); } private Result checkStorageNodesForDistributor(DistributorNodeInfo distributorNodeInfo, List<StorageNode> storageNodes, Node node) { for (StorageNode storageNode : storageNodes) { if (storageNode.getIndex() == node.getIndex()) { Integer minReplication = storageNode.getMinCurrentReplicationFactorOrNull(); if (minReplication != null && minReplication < requiredRedundancy) { return createDisallowed("Distributor " + distributorNodeInfo.getNodeIndex() + " says storage node " + node.getIndex() + " has buckets with redundancy as low as " + storageNode.getMinCurrentReplicationFactorOrNull() + ", but we require at least " + requiredRedundancy); } else { return allowSettingOfWantedState(); } } } return allowSettingOfWantedState(); } /** * We want to check with the distributors to verify that it is safe to take down the storage node. * @param node the node to be checked * @param clusterStateVersion the cluster state we expect distributors to have */ private Result checkDistributors(Node node, int clusterStateVersion) { if (clusterInfo.getDistributorNodeInfos().isEmpty()) { return createDisallowed("Not aware of any distributors, probably not safe to upgrade?"); } for (DistributorNodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { Integer distributorClusterStateVersion = distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull(); if (distributorClusterStateVersion == null) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " has not reported any cluster state version yet."); } else if (distributorClusterStateVersion != clusterStateVersion) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " does not report same version (" + distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull() + ") as fleetcontroller (" + clusterStateVersion + ")"); } List<StorageNode> storageNodes = distributorNodeInfo.getHostInfo().getDistributor().getStorageNodes(); Result storageNodesResult = checkStorageNodesForDistributor(distributorNodeInfo, storageNodes, node); if (!storageNodesResult.settingWantedStateIsAllowed()) { return storageNodesResult; } } return allowSettingOfWantedState(); } private Set<Integer> groupsWithStorageNodesWantedStateNotUp() { return clusterInfo.getStorageNodeInfos().stream() .filter(sni -> !UP.equals(sni.getWantedState().getState())) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } }
class NodeStateChangeChecker { private static final Logger log = Logger.getLogger(NodeStateChangeChecker.class.getName()); private static final String BUCKETS_METRIC_NAME = "vds.datastored.bucket_space.buckets_total"; private static final Map<String, String> BUCKETS_METRIC_DIMENSIONS = Map.of("bucketSpace", "default"); private final int requiredRedundancy; private final HierarchicalGroupVisiting groupVisiting; private final ClusterInfo clusterInfo; private final boolean inMoratorium; private final int maxNumberOfGroupsAllowedToBeDown; public static class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } } public Result evaluateTransition(Node node, ClusterState clusterState, SetUnitStateRequest.Condition condition, NodeState oldWantedState, NodeState newWantedState) { if (condition == FORCE) { return allowSettingOfWantedState(); } if (inMoratorium) { return createDisallowed("Master cluster controller is bootstrapping and in moratorium"); } if (condition != SAFE) { return createDisallowed("Condition not implemented: " + condition.name()); } if (node.getType() != STORAGE) { return createDisallowed("Safe-set of node state is only supported for storage nodes! " + "Requested node type: " + node.getType().toString()); } StorageNodeInfo nodeInfo = clusterInfo.getStorageNodeInfo(node.getIndex()); if (nodeInfo == null) { return createDisallowed("Unknown node " + node); } if (newWantedState.getState().equals(oldWantedState.getState()) && Objects.equals(newWantedState.getDescription(), oldWantedState.getDescription())) { return createAlreadySet(); } return switch (newWantedState.getState()) { case UP -> canSetStateUp(nodeInfo, oldWantedState); case MAINTENANCE -> canSetStateMaintenanceTemporarily(nodeInfo, clusterState, newWantedState.getDescription()); case DOWN -> canSetStateDownPermanently(nodeInfo, clusterState, newWantedState.getDescription()); default -> createDisallowed("Destination node state unsupported in safe mode: " + newWantedState); }; } private Result canSetStateDownPermanently(NodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } State reportedState = nodeInfo.getReportedState().getState(); if (reportedState != UP) { return createDisallowed("Reported state (" + reportedState + ") is not UP, so no bucket data is available"); } State currentState = clusterState.getNodeState(nodeInfo.getNode()).getState(); if (currentState != RETIRED) { return createDisallowed("Only retired nodes are allowed to be set to DOWN in safe mode - is " + currentState); } HostInfo hostInfo = nodeInfo.getHostInfo(); Integer hostInfoNodeVersion = hostInfo.getClusterStateVersionOrNull(); int clusterControllerVersion = clusterState.getVersion(); if (hostInfoNodeVersion == null || hostInfoNodeVersion != clusterControllerVersion) { return createDisallowed("Cluster controller at version " + clusterControllerVersion + " got info for storage node " + nodeInfo.getNodeIndex() + " at a different version " + hostInfoNodeVersion); } Optional<Metrics.Value> bucketsMetric; bucketsMetric = hostInfo.getMetrics().getValueAt(BUCKETS_METRIC_NAME, BUCKETS_METRIC_DIMENSIONS); if (bucketsMetric.isEmpty() || bucketsMetric.get().getLast() == null) { return createDisallowed("Missing last value of the " + BUCKETS_METRIC_NAME + " metric for storage node " + nodeInfo.getNodeIndex()); } long lastBuckets = bucketsMetric.get().getLast(); if (lastBuckets > 0) { return createDisallowed("The storage node manages " + lastBuckets + " buckets"); } return allowSettingOfWantedState(); } private Result canSetStateUp(NodeInfo nodeInfo, NodeState oldWantedState) { if (oldWantedState.getState() == UP) { return createAlreadySet(); } if (nodeInfo.getReportedState().getState() != UP) { return createDisallowed("Refuse to set wanted state to UP, " + "since the reported state is not UP (" + nodeInfo.getReportedState().getState() + ")"); } return allowSettingOfWantedState(); } private Result canSetStateMaintenanceTemporarily(StorageNodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } if (maxNumberOfGroupsAllowedToBeDown == -1) { var otherGroupCheck = anotherNodeInAnotherGroupHasWantedState(nodeInfo); if (!otherGroupCheck.settingWantedStateIsAllowed()) { return otherGroupCheck; } if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) { return allowSettingOfWantedState(); } } else { var result = otherNodesHaveWantedState(nodeInfo, newDescription, clusterState); if (result.isPresent()) return result.get(); } if (clusterState.getNodeState(nodeInfo.getNode()).getState() == DOWN) { log.log(FINE, "node is DOWN, allow"); return allowSettingOfWantedState(); } Result allNodesAreUpCheck = checkAllNodesAreUp(clusterState); if (!allNodesAreUpCheck.settingWantedStateIsAllowed()) { log.log(FINE, "allNodesAreUpCheck: " + allNodesAreUpCheck); return allNodesAreUpCheck; } Result checkDistributorsResult = checkDistributors(nodeInfo.getNode(), clusterState.getVersion()); if (!checkDistributorsResult.settingWantedStateIsAllowed()) { log.log(FINE, "checkDistributors: "+ checkDistributorsResult); return checkDistributorsResult; } return allowSettingOfWantedState(); } /** * Returns a disallow-result if there is another node (in another group, if hierarchical) * that has a wanted state != UP. We disallow more than 1 suspended node/group at a time. */ private Result anotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) { if (groupVisiting.isHierarchical()) { SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>(); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { Result result = otherNodeInGroupHasWantedState(group); if (!result.settingWantedStateIsAllowed()) { anotherNodeHasWantedState.set(result); return false; } } return true; }); return anotherNodeHasWantedState.asOptional().orElseGet(Result::allowSettingOfWantedState); } else { return otherNodeHasWantedState(nodeInfo); } } /** * Returns an optional Result, where return value is: * For flat setup: Return Optional.of(disallowed) if wanted state is set on some node, else Optional.empty * For hierarchical setup: No wanted state for other nodes, return Optional.empty * Wanted state for nodes/groups are not UP: * if less than maxNumberOfGroupsAllowedToBeDown: return Optional.of(allowed) * else: if node is in group with nodes already down: return Optional.of(allowed), else Optional.of(disallowed) */ private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription, ClusterState clusterState) { Node node = nodeInfo.getNode(); if (groupVisiting.isHierarchical()) { Set<Integer> groupsWithNodesWantedStateNotUp = groupsWithUserWantedStateNotUp(); if (groupsWithNodesWantedStateNotUp.size() == 0) { log.log(FINE, "groupsWithNodesWantedStateNotUp=0"); return Optional.empty(); } Set<Integer> groupsWithSameStateAndDescription = groupsWithSameStateAndDescription(MAINTENANCE, newDescription); if (aGroupContainsNode(groupsWithSameStateAndDescription, node)) { log.log(FINE, "Node is in group with same state and description, allow"); return Optional.of(allowSettingOfWantedState()); } if (groupsWithSameStateAndDescription.size() == 0) { return Optional.of(createDisallowed("Wanted state already set for another node in groups: " + sortSetIntoList(groupsWithNodesWantedStateNotUp))); } Set<Integer> retiredAndNotUpGroups = groupsWithNotRetiredAndNotUp(clusterState); int numberOfGroupsToConsider = retiredAndNotUpGroups.size(); if (aGroupContainsNode(retiredAndNotUpGroups, node)) { numberOfGroupsToConsider = retiredAndNotUpGroups.size() - 1; } if (numberOfGroupsToConsider < maxNumberOfGroupsAllowedToBeDown) { log.log(FINE, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups); return Optional.of(allowSettingOfWantedState()); } return Optional.of(createDisallowed(String.format("At most %d groups can have wanted state: %s", maxNumberOfGroupsAllowedToBeDown, sortSetIntoList(retiredAndNotUpGroups)))); } else { var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo); if ( ! otherNodeHasWantedState.settingWantedStateIsAllowed()) return Optional.of(otherNodeHasWantedState); } return Optional.empty(); } private ArrayList<Integer> sortSetIntoList(Set<Integer> set) { var sortedList = new ArrayList<>(set); Collections.sort(sortedList); return sortedList; } /** Returns a disallow-result, if there is a node in the group with wanted state != UP. */ private Result otherNodeInGroupHasWantedState(Group group) { for (var configuredNode : group.getNodes()) { int index = configuredNode.index(); StorageNodeInfo storageNodeInfo = clusterInfo.getStorageNodeInfo(index); if (storageNodeInfo == null) continue; State storageNodeWantedState = storageNodeInfo.getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other storage node " + index + " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other distributor " + index + " in group " + group.getIndex() + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private Result otherNodeHasWantedState(StorageNodeInfo nodeInfo) { for (var configuredNode : clusterInfo.getConfiguredNodes().values()) { int index = configuredNode.index(); if (index == nodeInfo.getNodeIndex()) { continue; } State storageNodeWantedState = clusterInfo.getStorageNodeInfo(index).getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private boolean anotherNodeInGroupAlreadyAllowed(StorageNodeInfo nodeInfo, String newDescription) { MutableBoolean alreadyAllowed = new MutableBoolean(false); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { return true; } alreadyAllowed.set(anotherNodeInGroupAlreadyAllowed(group, nodeInfo.getNode(), newDescription)); return false; }); return alreadyAllowed.get(); } private boolean anotherNodeInGroupAlreadyAllowed(Group group, Node node, String newDescription) { return group.getNodes().stream() .filter(configuredNode -> configuredNode.index() != node.getIndex()) .map(configuredNode -> clusterInfo.getStorageNodeInfo(configuredNode.index())) .filter(Objects::nonNull) .map(NodeInfo::getUserWantedState) .anyMatch(userWantedState -> userWantedState.getState() == State.MAINTENANCE && Objects.equals(userWantedState.getDescription(), newDescription)); } private static boolean groupContainsNode(Group group, Node node) { for (ConfiguredNode configuredNode : group.getNodes()) { if (configuredNode.index() == node.getIndex()) { return true; } } return false; } private boolean aGroupContainsNode(Collection<Integer> groupIndexes, Node node) { for (Group group : getGroupsWithIndexes(groupIndexes)) { if (groupContainsNode(group, node)) return true; } return false; } private List<Group> getGroupsWithIndexes(Collection<Integer> groupIndexes) { return clusterInfo.getStorageNodeInfos().stream() .map(NodeInfo::getGroup) .filter(group -> groupIndexes.contains(group.getIndex())) .collect(Collectors.toList()); } private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); } private Result checkStorageNodesForDistributor(DistributorNodeInfo distributorNodeInfo, List<StorageNode> storageNodes, Node node) { for (StorageNode storageNode : storageNodes) { if (storageNode.getIndex() == node.getIndex()) { Integer minReplication = storageNode.getMinCurrentReplicationFactorOrNull(); if (minReplication != null && minReplication < requiredRedundancy) { return createDisallowed("Distributor " + distributorNodeInfo.getNodeIndex() + " says storage node " + node.getIndex() + " has buckets with redundancy as low as " + storageNode.getMinCurrentReplicationFactorOrNull() + ", but we require at least " + requiredRedundancy); } else { return allowSettingOfWantedState(); } } } return allowSettingOfWantedState(); } /** * We want to check with the distributors to verify that it is safe to take down the storage node. * @param node the node to be checked * @param clusterStateVersion the cluster state we expect distributors to have */ private Result checkDistributors(Node node, int clusterStateVersion) { if (clusterInfo.getDistributorNodeInfos().isEmpty()) { return createDisallowed("Not aware of any distributors, probably not safe to upgrade?"); } for (DistributorNodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { Integer distributorClusterStateVersion = distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull(); if (distributorClusterStateVersion == null) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " has not reported any cluster state version yet."); } else if (distributorClusterStateVersion != clusterStateVersion) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " does not report same version (" + distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull() + ") as fleetcontroller (" + clusterStateVersion + ")"); } List<StorageNode> storageNodes = distributorNodeInfo.getHostInfo().getDistributor().getStorageNodes(); Result storageNodesResult = checkStorageNodesForDistributor(distributorNodeInfo, storageNodes, node); if (!storageNodesResult.settingWantedStateIsAllowed()) { return storageNodesResult; } } return allowSettingOfWantedState(); } private Set<Integer> groupsWithUserWantedStateNotUp() { return clusterInfo.getAllNodeInfos().stream() .filter(sni -> !UP.equals(sni.getUserWantedState().getState())) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } private Set<Integer> groupsWithSameStateAndDescription(State state, String newDescription) { return clusterInfo.getAllNodeInfos().stream() .filter(nodeInfo -> { var userWantedState = nodeInfo.getUserWantedState(); return userWantedState.getState() == state && Objects.equals(userWantedState.getDescription(), newDescription); }) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } private Set<Integer> groupsWithNotRetiredAndNotUp(ClusterState clusterState) { return clusterInfo.getAllNodeInfos().stream() .filter(nodeInfo -> (nodeInfo.getUserWantedState().getState() != RETIRED && nodeInfo.getUserWantedState().getState() != UP) || (clusterState.getNodeState(nodeInfo.getNode()).getState() != RETIRED && clusterState.getNodeState(nodeInfo.getNode()).getState() != UP)) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } }
Why not make allowMoreThanOneContentGroupDown a `Predicate<ClusterSpec.Id>`, and then invoke `flagValue(...)` here? Why go via `int` and `flagValueAsInt`?
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) { this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT); this.feedSequencer = flagValue(source, appId, version, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, version, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, version, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, version, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REPLY_THREAD); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, version, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, version, Flags.FEED_CONCURRENCY); this.feedNiceness = flagValue(source, appId, version, Flags.FEED_NICENESS); this.mbus_network_threads = flagValue(source, appId, version, Flags.MBUS_NUM_NETWORK_THREADS); this.allowedAthenzProxyIdentities = flagValue(source, appId, version, Flags.ALLOWED_ATHENZ_PROXY_IDENTITIES); this.maxActivationInhibitedOutOfSyncGroups = flagValue(source, appId, version, Flags.MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS); this.jvmOmitStackTraceInFastThrow = type -> flagValueAsInt(source, appId, version, type, PermanentFlags.JVM_OMIT_STACK_TRACE_IN_FAST_THROW); this.resourceLimitDisk = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_DISK); this.resourceLimitMemory = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_MEMORY); this.minNodeRatioPerGroup = flagValue(source, appId, version, Flags.MIN_NODE_RATIO_PER_GROUP); this.containerDumpHeapOnShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_DUMP_HEAP_ON_SHUTDOWN_TIMEOUT); this.loadCodeAsHugePages = flagValue(source, appId, version, Flags.LOAD_CODE_AS_HUGEPAGES); this.containerShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_SHUTDOWN_TIMEOUT); this.maxUnCommittedMemory = flagValue(source, appId, version, Flags.MAX_UNCOMMITTED_MEMORY); this.forwardIssuesAsErrors = flagValue(source, appId, version, PermanentFlags.FORWARD_ISSUES_AS_ERRORS); this.ignoreThreadStackSizes = flagValue(source, appId, version, Flags.IGNORE_THREAD_STACK_SIZES); this.useV8GeoPositions = flagValue(source, appId, version, Flags.USE_V8_GEO_POSITIONS); this.maxCompactBuffers = flagValue(source, appId, version, Flags.MAX_COMPACT_BUFFERS); this.ignoredHttpUserAgents = flagValue(source, appId, version, PermanentFlags.IGNORED_HTTP_USER_AGENTS); this.useQrserverServiceName = flagValue(source, appId, version, Flags.USE_QRSERVER_SERVICE_NAME); this.avoidRenamingSummaryFeatures = flagValue(source, appId, version, Flags.AVOID_RENAMING_SUMMARY_FEATURES); this.adminClusterArchitecture = Architecture.valueOf(flagValue(source, appId, version, PermanentFlags.ADMIN_CLUSTER_NODE_ARCHITECTURE)); this.enableProxyProtocolMixedMode = flagValue(source, appId, version, Flags.ENABLE_PROXY_PROTOCOL_MIXED_MODE); this.sharedStringRepoNoReclaim = flagValue(source, appId, version, Flags.SHARED_STRING_REPO_NO_RECLAIM); this.logFileCompressionAlgorithm = flagValue(source, appId, version, Flags.LOG_FILE_COMPRESSION_ALGORITHM); this.mbus_java_num_targets = flagValue(source, appId, version, Flags.MBUS_JAVA_NUM_TARGETS); this.mbus_java_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_JAVA_EVENTS_BEFORE_WAKEUP); this.mbus_cpp_num_targets = flagValue(source, appId, version, Flags.MBUS_CPP_NUM_TARGETS); this.mbus_cpp_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_CPP_EVENTS_BEFORE_WAKEUP); this.rpc_num_targets = flagValue(source, appId, version, Flags.RPC_NUM_TARGETS); this.rpc_events_before_wakeup = flagValue(source, appId, version, Flags.RPC_EVENTS_BEFORE_WAKEUP); this.queryDispatchPolicy = flagValue(source, appId, version, Flags.QUERY_DISPATCH_POLICY); this.queryDispatchWarmup = flagValue(source, appId, version, PermanentFlags.QUERY_DISPATCH_WARMUP); this.useRestrictedDataPlaneBindings = flagValue(source, appId, version, Flags.RESTRICT_DATA_PLANE_BINDINGS); this.heapPercentage = flagValue(source, appId, version, PermanentFlags.HEAP_SIZE_PERCENTAGE); this.enableGlobalPhase = flagValue(source, appId, version, Flags.ENABLE_GLOBAL_PHASE); this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY); this.allowMoreThanOneContentGroupDown = clusterId -> flagValueAsInt(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN); }
this.allowMoreThanOneContentGroupDown = clusterId -> flagValueAsInt(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN);
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) { this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT); this.feedSequencer = flagValue(source, appId, version, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, version, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, version, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, version, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REPLY_THREAD); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, version, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, version, Flags.FEED_CONCURRENCY); this.feedNiceness = flagValue(source, appId, version, Flags.FEED_NICENESS); this.mbus_network_threads = flagValue(source, appId, version, Flags.MBUS_NUM_NETWORK_THREADS); this.allowedAthenzProxyIdentities = flagValue(source, appId, version, Flags.ALLOWED_ATHENZ_PROXY_IDENTITIES); this.maxActivationInhibitedOutOfSyncGroups = flagValue(source, appId, version, Flags.MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS); this.jvmOmitStackTraceInFastThrow = type -> flagValue(source, appId, version, type, PermanentFlags.JVM_OMIT_STACK_TRACE_IN_FAST_THROW); this.resourceLimitDisk = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_DISK); this.resourceLimitMemory = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_MEMORY); this.minNodeRatioPerGroup = flagValue(source, appId, version, Flags.MIN_NODE_RATIO_PER_GROUP); this.containerDumpHeapOnShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_DUMP_HEAP_ON_SHUTDOWN_TIMEOUT); this.loadCodeAsHugePages = flagValue(source, appId, version, Flags.LOAD_CODE_AS_HUGEPAGES); this.containerShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_SHUTDOWN_TIMEOUT); this.maxUnCommittedMemory = flagValue(source, appId, version, Flags.MAX_UNCOMMITTED_MEMORY); this.forwardIssuesAsErrors = flagValue(source, appId, version, PermanentFlags.FORWARD_ISSUES_AS_ERRORS); this.ignoreThreadStackSizes = flagValue(source, appId, version, Flags.IGNORE_THREAD_STACK_SIZES); this.useV8GeoPositions = flagValue(source, appId, version, Flags.USE_V8_GEO_POSITIONS); this.maxCompactBuffers = flagValue(source, appId, version, Flags.MAX_COMPACT_BUFFERS); this.ignoredHttpUserAgents = flagValue(source, appId, version, PermanentFlags.IGNORED_HTTP_USER_AGENTS); this.useQrserverServiceName = flagValue(source, appId, version, Flags.USE_QRSERVER_SERVICE_NAME); this.avoidRenamingSummaryFeatures = flagValue(source, appId, version, Flags.AVOID_RENAMING_SUMMARY_FEATURES); this.adminClusterArchitecture = Architecture.valueOf(flagValue(source, appId, version, PermanentFlags.ADMIN_CLUSTER_NODE_ARCHITECTURE)); this.enableProxyProtocolMixedMode = flagValue(source, appId, version, Flags.ENABLE_PROXY_PROTOCOL_MIXED_MODE); this.sharedStringRepoNoReclaim = flagValue(source, appId, version, Flags.SHARED_STRING_REPO_NO_RECLAIM); this.logFileCompressionAlgorithm = flagValue(source, appId, version, Flags.LOG_FILE_COMPRESSION_ALGORITHM); this.mbus_java_num_targets = flagValue(source, appId, version, Flags.MBUS_JAVA_NUM_TARGETS); this.mbus_java_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_JAVA_EVENTS_BEFORE_WAKEUP); this.mbus_cpp_num_targets = flagValue(source, appId, version, Flags.MBUS_CPP_NUM_TARGETS); this.mbus_cpp_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_CPP_EVENTS_BEFORE_WAKEUP); this.rpc_num_targets = flagValue(source, appId, version, Flags.RPC_NUM_TARGETS); this.rpc_events_before_wakeup = flagValue(source, appId, version, Flags.RPC_EVENTS_BEFORE_WAKEUP); this.queryDispatchPolicy = flagValue(source, appId, version, Flags.QUERY_DISPATCH_POLICY); this.queryDispatchWarmup = flagValue(source, appId, version, PermanentFlags.QUERY_DISPATCH_WARMUP); this.useRestrictedDataPlaneBindings = flagValue(source, appId, version, Flags.RESTRICT_DATA_PLANE_BINDINGS); this.heapPercentage = flagValue(source, appId, version, PermanentFlags.HEAP_SIZE_PERCENTAGE); this.enableGlobalPhase = flagValue(source, appId, version, Flags.ENABLE_GLOBAL_PHASE); this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY); this.allowMoreThanOneContentGroupDown = clusterId -> flagValue(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN); }
class FeatureFlags implements ModelContext.FeatureFlags { private final String queryDispatchPolicy; private final double queryDispatchWarmup; private final double defaultTermwiseLimit; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final double feedNiceness; private final List<String> allowedAthenzProxyIdentities; private final int maxActivationInhibitedOutOfSyncGroups; private final ToIntFunction<ClusterSpec.Type> jvmOmitStackTraceInFastThrow; private final double resourceLimitDisk; private final double resourceLimitMemory; private final double minNodeRatioPerGroup; private final boolean containerDumpHeapOnShutdownTimeout; private final boolean loadCodeAsHugePages; private final double containerShutdownTimeout; private final int maxUnCommittedMemory; private final boolean forwardIssuesAsErrors; private final boolean ignoreThreadStackSizes; private final boolean useV8GeoPositions; private final int maxCompactBuffers; private final List<String> ignoredHttpUserAgents; private final boolean useQrserverServiceName; private final boolean avoidRenamingSummaryFeatures; private final Architecture adminClusterArchitecture; private final boolean enableProxyProtocolMixedMode; private final boolean sharedStringRepoNoReclaim; private final String logFileCompressionAlgorithm; private final int mbus_network_threads; private final int mbus_java_num_targets; private final int mbus_java_events_before_wakeup; private final int mbus_cpp_num_targets; private final int mbus_cpp_events_before_wakeup; private final int rpc_num_targets; private final int rpc_events_before_wakeup; private final boolean useRestrictedDataPlaneBindings; private final int heapPercentage; private final boolean enableGlobalPhase; private final String summaryDecodePolicy; private final ToIntFunction<ClusterSpec.Id> allowMoreThanOneContentGroupDown; @Override public int heapSizePercentage() { return heapPercentage; } @Override public String queryDispatchPolicy() { return queryDispatchPolicy; } @Override public double queryDispatchWarmup() { return queryDispatchWarmup; } @Override public String summaryDecodePolicy() { return summaryDecodePolicy; } @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public double feedNiceness() { return feedNiceness; } @Override public int mbusNetworkThreads() { return mbus_network_threads; } @Override public List<String> allowedAthenzProxyIdentities() { return allowedAthenzProxyIdentities; } @Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; } @Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return translateJvmOmitStackTraceInFastThrowIntToString(jvmOmitStackTraceInFastThrow, type); } @Override public double resourceLimitDisk() { return resourceLimitDisk; } @Override public double resourceLimitMemory() { return resourceLimitMemory; } @Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; } @Override public double containerShutdownTimeout() { return containerShutdownTimeout; } @Override public boolean containerDumpHeapOnShutdownTimeout() { return containerDumpHeapOnShutdownTimeout; } @Override public boolean loadCodeAsHugePages() { return loadCodeAsHugePages; } @Override public int maxUnCommittedMemory() { return maxUnCommittedMemory; } @Override public boolean forwardIssuesAsErrors() { return forwardIssuesAsErrors; } @Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; } @Override public boolean useV8GeoPositions() { return useV8GeoPositions; } @Override public int maxCompactBuffers() { return maxCompactBuffers; } @Override public List<String> ignoredHttpUserAgents() { return ignoredHttpUserAgents; } @Override public boolean useQrserverServiceName() { return useQrserverServiceName; } @Override public boolean avoidRenamingSummaryFeatures() { return avoidRenamingSummaryFeatures; } @Override public Architecture adminClusterArchitecture() { return adminClusterArchitecture; } @Override public boolean enableProxyProtocolMixedMode() { return enableProxyProtocolMixedMode; } @Override public boolean sharedStringRepoNoReclaim() { return sharedStringRepoNoReclaim; } @Override public int mbusJavaRpcNumTargets() { return mbus_java_num_targets; } @Override public int mbusJavaEventsBeforeWakeup() { return mbus_java_events_before_wakeup; } @Override public int mbusCppRpcNumTargets() { return mbus_cpp_num_targets; } @Override public int mbusCppEventsBeforeWakeup() { return mbus_cpp_events_before_wakeup; } @Override public int rpcNumTargets() { return rpc_num_targets; } @Override public int rpcEventsBeforeWakeup() { return rpc_events_before_wakeup; } @Override public String logFileCompressionAlgorithm(String defVal) { var fflag = this.logFileCompressionAlgorithm; if (fflag != null && ! fflag.equals("")) { return fflag; } return defVal; } @Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; } @Override public boolean enableGlobalPhase() { return enableGlobalPhase; } @Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.applyAsInt(id) != 0; } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, TenantName tenant, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.TENANT_ID, tenant.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Type clusterType, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, clusterType.name()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Id clusterId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_ID, clusterId.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } static int flagValueAsInt(FlagSource source, ApplicationId appId, Version version, ClusterSpec.Type clusterType, UnboundFlag<? extends Boolean, ?, ?> flag) { return flagValue(source, appId, version, clusterType, flag) ? 1 : 0; } static int flagValueAsInt(FlagSource source, ApplicationId appId, Version version, ClusterSpec.Id clusterId, UnboundFlag<? extends Boolean, ?, ?> flag) { return flagValue(source, appId, version, clusterId, flag) ? 1 : 0; } private String translateJvmOmitStackTraceInFastThrowIntToString(ToIntFunction<ClusterSpec.Type> function, ClusterSpec.Type clusterType) { return function.applyAsInt(clusterType) == 1 ? "" : "-XX:-OmitStackTraceInFastThrow"; } }
class FeatureFlags implements ModelContext.FeatureFlags { private final String queryDispatchPolicy; private final double queryDispatchWarmup; private final double defaultTermwiseLimit; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final double feedNiceness; private final List<String> allowedAthenzProxyIdentities; private final int maxActivationInhibitedOutOfSyncGroups; private final Predicate<ClusterSpec.Type> jvmOmitStackTraceInFastThrow; private final double resourceLimitDisk; private final double resourceLimitMemory; private final double minNodeRatioPerGroup; private final boolean containerDumpHeapOnShutdownTimeout; private final boolean loadCodeAsHugePages; private final double containerShutdownTimeout; private final int maxUnCommittedMemory; private final boolean forwardIssuesAsErrors; private final boolean ignoreThreadStackSizes; private final boolean useV8GeoPositions; private final int maxCompactBuffers; private final List<String> ignoredHttpUserAgents; private final boolean useQrserverServiceName; private final boolean avoidRenamingSummaryFeatures; private final Architecture adminClusterArchitecture; private final boolean enableProxyProtocolMixedMode; private final boolean sharedStringRepoNoReclaim; private final String logFileCompressionAlgorithm; private final int mbus_network_threads; private final int mbus_java_num_targets; private final int mbus_java_events_before_wakeup; private final int mbus_cpp_num_targets; private final int mbus_cpp_events_before_wakeup; private final int rpc_num_targets; private final int rpc_events_before_wakeup; private final boolean useRestrictedDataPlaneBindings; private final int heapPercentage; private final boolean enableGlobalPhase; private final String summaryDecodePolicy; private final Predicate<ClusterSpec.Id> allowMoreThanOneContentGroupDown; @Override public int heapSizePercentage() { return heapPercentage; } @Override public String queryDispatchPolicy() { return queryDispatchPolicy; } @Override public double queryDispatchWarmup() { return queryDispatchWarmup; } @Override public String summaryDecodePolicy() { return summaryDecodePolicy; } @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public double feedNiceness() { return feedNiceness; } @Override public int mbusNetworkThreads() { return mbus_network_threads; } @Override public List<String> allowedAthenzProxyIdentities() { return allowedAthenzProxyIdentities; } @Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; } @Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return translateJvmOmitStackTraceInFastThrowToString(jvmOmitStackTraceInFastThrow, type); } @Override public double resourceLimitDisk() { return resourceLimitDisk; } @Override public double resourceLimitMemory() { return resourceLimitMemory; } @Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; } @Override public double containerShutdownTimeout() { return containerShutdownTimeout; } @Override public boolean containerDumpHeapOnShutdownTimeout() { return containerDumpHeapOnShutdownTimeout; } @Override public boolean loadCodeAsHugePages() { return loadCodeAsHugePages; } @Override public int maxUnCommittedMemory() { return maxUnCommittedMemory; } @Override public boolean forwardIssuesAsErrors() { return forwardIssuesAsErrors; } @Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; } @Override public boolean useV8GeoPositions() { return useV8GeoPositions; } @Override public int maxCompactBuffers() { return maxCompactBuffers; } @Override public List<String> ignoredHttpUserAgents() { return ignoredHttpUserAgents; } @Override public boolean useQrserverServiceName() { return useQrserverServiceName; } @Override public boolean avoidRenamingSummaryFeatures() { return avoidRenamingSummaryFeatures; } @Override public Architecture adminClusterArchitecture() { return adminClusterArchitecture; } @Override public boolean enableProxyProtocolMixedMode() { return enableProxyProtocolMixedMode; } @Override public boolean sharedStringRepoNoReclaim() { return sharedStringRepoNoReclaim; } @Override public int mbusJavaRpcNumTargets() { return mbus_java_num_targets; } @Override public int mbusJavaEventsBeforeWakeup() { return mbus_java_events_before_wakeup; } @Override public int mbusCppRpcNumTargets() { return mbus_cpp_num_targets; } @Override public int mbusCppEventsBeforeWakeup() { return mbus_cpp_events_before_wakeup; } @Override public int rpcNumTargets() { return rpc_num_targets; } @Override public int rpcEventsBeforeWakeup() { return rpc_events_before_wakeup; } @Override public String logFileCompressionAlgorithm(String defVal) { var fflag = this.logFileCompressionAlgorithm; if (fflag != null && ! fflag.equals("")) { return fflag; } return defVal; } @Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; } @Override public boolean enableGlobalPhase() { return enableGlobalPhase; } @Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.test(id); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, TenantName tenant, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.TENANT_ID, tenant.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Type clusterType, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, clusterType.name()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Id clusterId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_ID, clusterId.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private String translateJvmOmitStackTraceInFastThrowToString(Predicate<ClusterSpec.Type> function, ClusterSpec.Type clusterType) { return function.test(clusterType) ? "" : "-XX:-OmitStackTraceInFastThrow"; } }
Yeah, why not. Fixed
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) { this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT); this.feedSequencer = flagValue(source, appId, version, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, version, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, version, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, version, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REPLY_THREAD); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, version, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, version, Flags.FEED_CONCURRENCY); this.feedNiceness = flagValue(source, appId, version, Flags.FEED_NICENESS); this.mbus_network_threads = flagValue(source, appId, version, Flags.MBUS_NUM_NETWORK_THREADS); this.allowedAthenzProxyIdentities = flagValue(source, appId, version, Flags.ALLOWED_ATHENZ_PROXY_IDENTITIES); this.maxActivationInhibitedOutOfSyncGroups = flagValue(source, appId, version, Flags.MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS); this.jvmOmitStackTraceInFastThrow = type -> flagValueAsInt(source, appId, version, type, PermanentFlags.JVM_OMIT_STACK_TRACE_IN_FAST_THROW); this.resourceLimitDisk = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_DISK); this.resourceLimitMemory = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_MEMORY); this.minNodeRatioPerGroup = flagValue(source, appId, version, Flags.MIN_NODE_RATIO_PER_GROUP); this.containerDumpHeapOnShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_DUMP_HEAP_ON_SHUTDOWN_TIMEOUT); this.loadCodeAsHugePages = flagValue(source, appId, version, Flags.LOAD_CODE_AS_HUGEPAGES); this.containerShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_SHUTDOWN_TIMEOUT); this.maxUnCommittedMemory = flagValue(source, appId, version, Flags.MAX_UNCOMMITTED_MEMORY); this.forwardIssuesAsErrors = flagValue(source, appId, version, PermanentFlags.FORWARD_ISSUES_AS_ERRORS); this.ignoreThreadStackSizes = flagValue(source, appId, version, Flags.IGNORE_THREAD_STACK_SIZES); this.useV8GeoPositions = flagValue(source, appId, version, Flags.USE_V8_GEO_POSITIONS); this.maxCompactBuffers = flagValue(source, appId, version, Flags.MAX_COMPACT_BUFFERS); this.ignoredHttpUserAgents = flagValue(source, appId, version, PermanentFlags.IGNORED_HTTP_USER_AGENTS); this.useQrserverServiceName = flagValue(source, appId, version, Flags.USE_QRSERVER_SERVICE_NAME); this.avoidRenamingSummaryFeatures = flagValue(source, appId, version, Flags.AVOID_RENAMING_SUMMARY_FEATURES); this.adminClusterArchitecture = Architecture.valueOf(flagValue(source, appId, version, PermanentFlags.ADMIN_CLUSTER_NODE_ARCHITECTURE)); this.enableProxyProtocolMixedMode = flagValue(source, appId, version, Flags.ENABLE_PROXY_PROTOCOL_MIXED_MODE); this.sharedStringRepoNoReclaim = flagValue(source, appId, version, Flags.SHARED_STRING_REPO_NO_RECLAIM); this.logFileCompressionAlgorithm = flagValue(source, appId, version, Flags.LOG_FILE_COMPRESSION_ALGORITHM); this.mbus_java_num_targets = flagValue(source, appId, version, Flags.MBUS_JAVA_NUM_TARGETS); this.mbus_java_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_JAVA_EVENTS_BEFORE_WAKEUP); this.mbus_cpp_num_targets = flagValue(source, appId, version, Flags.MBUS_CPP_NUM_TARGETS); this.mbus_cpp_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_CPP_EVENTS_BEFORE_WAKEUP); this.rpc_num_targets = flagValue(source, appId, version, Flags.RPC_NUM_TARGETS); this.rpc_events_before_wakeup = flagValue(source, appId, version, Flags.RPC_EVENTS_BEFORE_WAKEUP); this.queryDispatchPolicy = flagValue(source, appId, version, Flags.QUERY_DISPATCH_POLICY); this.queryDispatchWarmup = flagValue(source, appId, version, PermanentFlags.QUERY_DISPATCH_WARMUP); this.useRestrictedDataPlaneBindings = flagValue(source, appId, version, Flags.RESTRICT_DATA_PLANE_BINDINGS); this.heapPercentage = flagValue(source, appId, version, PermanentFlags.HEAP_SIZE_PERCENTAGE); this.enableGlobalPhase = flagValue(source, appId, version, Flags.ENABLE_GLOBAL_PHASE); this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY); this.allowMoreThanOneContentGroupDown = clusterId -> flagValueAsInt(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN); }
this.allowMoreThanOneContentGroupDown = clusterId -> flagValueAsInt(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN);
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) { this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT); this.feedSequencer = flagValue(source, appId, version, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, version, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, version, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, version, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, version, Flags.SKIP_MBUS_REPLY_THREAD); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, version, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, version, Flags.FEED_CONCURRENCY); this.feedNiceness = flagValue(source, appId, version, Flags.FEED_NICENESS); this.mbus_network_threads = flagValue(source, appId, version, Flags.MBUS_NUM_NETWORK_THREADS); this.allowedAthenzProxyIdentities = flagValue(source, appId, version, Flags.ALLOWED_ATHENZ_PROXY_IDENTITIES); this.maxActivationInhibitedOutOfSyncGroups = flagValue(source, appId, version, Flags.MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS); this.jvmOmitStackTraceInFastThrow = type -> flagValue(source, appId, version, type, PermanentFlags.JVM_OMIT_STACK_TRACE_IN_FAST_THROW); this.resourceLimitDisk = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_DISK); this.resourceLimitMemory = flagValue(source, appId, version, PermanentFlags.RESOURCE_LIMIT_MEMORY); this.minNodeRatioPerGroup = flagValue(source, appId, version, Flags.MIN_NODE_RATIO_PER_GROUP); this.containerDumpHeapOnShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_DUMP_HEAP_ON_SHUTDOWN_TIMEOUT); this.loadCodeAsHugePages = flagValue(source, appId, version, Flags.LOAD_CODE_AS_HUGEPAGES); this.containerShutdownTimeout = flagValue(source, appId, version, Flags.CONTAINER_SHUTDOWN_TIMEOUT); this.maxUnCommittedMemory = flagValue(source, appId, version, Flags.MAX_UNCOMMITTED_MEMORY); this.forwardIssuesAsErrors = flagValue(source, appId, version, PermanentFlags.FORWARD_ISSUES_AS_ERRORS); this.ignoreThreadStackSizes = flagValue(source, appId, version, Flags.IGNORE_THREAD_STACK_SIZES); this.useV8GeoPositions = flagValue(source, appId, version, Flags.USE_V8_GEO_POSITIONS); this.maxCompactBuffers = flagValue(source, appId, version, Flags.MAX_COMPACT_BUFFERS); this.ignoredHttpUserAgents = flagValue(source, appId, version, PermanentFlags.IGNORED_HTTP_USER_AGENTS); this.useQrserverServiceName = flagValue(source, appId, version, Flags.USE_QRSERVER_SERVICE_NAME); this.avoidRenamingSummaryFeatures = flagValue(source, appId, version, Flags.AVOID_RENAMING_SUMMARY_FEATURES); this.adminClusterArchitecture = Architecture.valueOf(flagValue(source, appId, version, PermanentFlags.ADMIN_CLUSTER_NODE_ARCHITECTURE)); this.enableProxyProtocolMixedMode = flagValue(source, appId, version, Flags.ENABLE_PROXY_PROTOCOL_MIXED_MODE); this.sharedStringRepoNoReclaim = flagValue(source, appId, version, Flags.SHARED_STRING_REPO_NO_RECLAIM); this.logFileCompressionAlgorithm = flagValue(source, appId, version, Flags.LOG_FILE_COMPRESSION_ALGORITHM); this.mbus_java_num_targets = flagValue(source, appId, version, Flags.MBUS_JAVA_NUM_TARGETS); this.mbus_java_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_JAVA_EVENTS_BEFORE_WAKEUP); this.mbus_cpp_num_targets = flagValue(source, appId, version, Flags.MBUS_CPP_NUM_TARGETS); this.mbus_cpp_events_before_wakeup = flagValue(source, appId, version, Flags.MBUS_CPP_EVENTS_BEFORE_WAKEUP); this.rpc_num_targets = flagValue(source, appId, version, Flags.RPC_NUM_TARGETS); this.rpc_events_before_wakeup = flagValue(source, appId, version, Flags.RPC_EVENTS_BEFORE_WAKEUP); this.queryDispatchPolicy = flagValue(source, appId, version, Flags.QUERY_DISPATCH_POLICY); this.queryDispatchWarmup = flagValue(source, appId, version, PermanentFlags.QUERY_DISPATCH_WARMUP); this.useRestrictedDataPlaneBindings = flagValue(source, appId, version, Flags.RESTRICT_DATA_PLANE_BINDINGS); this.heapPercentage = flagValue(source, appId, version, PermanentFlags.HEAP_SIZE_PERCENTAGE); this.enableGlobalPhase = flagValue(source, appId, version, Flags.ENABLE_GLOBAL_PHASE); this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY); this.allowMoreThanOneContentGroupDown = clusterId -> flagValue(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN); }
class FeatureFlags implements ModelContext.FeatureFlags { private final String queryDispatchPolicy; private final double queryDispatchWarmup; private final double defaultTermwiseLimit; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final double feedNiceness; private final List<String> allowedAthenzProxyIdentities; private final int maxActivationInhibitedOutOfSyncGroups; private final ToIntFunction<ClusterSpec.Type> jvmOmitStackTraceInFastThrow; private final double resourceLimitDisk; private final double resourceLimitMemory; private final double minNodeRatioPerGroup; private final boolean containerDumpHeapOnShutdownTimeout; private final boolean loadCodeAsHugePages; private final double containerShutdownTimeout; private final int maxUnCommittedMemory; private final boolean forwardIssuesAsErrors; private final boolean ignoreThreadStackSizes; private final boolean useV8GeoPositions; private final int maxCompactBuffers; private final List<String> ignoredHttpUserAgents; private final boolean useQrserverServiceName; private final boolean avoidRenamingSummaryFeatures; private final Architecture adminClusterArchitecture; private final boolean enableProxyProtocolMixedMode; private final boolean sharedStringRepoNoReclaim; private final String logFileCompressionAlgorithm; private final int mbus_network_threads; private final int mbus_java_num_targets; private final int mbus_java_events_before_wakeup; private final int mbus_cpp_num_targets; private final int mbus_cpp_events_before_wakeup; private final int rpc_num_targets; private final int rpc_events_before_wakeup; private final boolean useRestrictedDataPlaneBindings; private final int heapPercentage; private final boolean enableGlobalPhase; private final String summaryDecodePolicy; private final ToIntFunction<ClusterSpec.Id> allowMoreThanOneContentGroupDown; @Override public int heapSizePercentage() { return heapPercentage; } @Override public String queryDispatchPolicy() { return queryDispatchPolicy; } @Override public double queryDispatchWarmup() { return queryDispatchWarmup; } @Override public String summaryDecodePolicy() { return summaryDecodePolicy; } @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public double feedNiceness() { return feedNiceness; } @Override public int mbusNetworkThreads() { return mbus_network_threads; } @Override public List<String> allowedAthenzProxyIdentities() { return allowedAthenzProxyIdentities; } @Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; } @Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return translateJvmOmitStackTraceInFastThrowIntToString(jvmOmitStackTraceInFastThrow, type); } @Override public double resourceLimitDisk() { return resourceLimitDisk; } @Override public double resourceLimitMemory() { return resourceLimitMemory; } @Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; } @Override public double containerShutdownTimeout() { return containerShutdownTimeout; } @Override public boolean containerDumpHeapOnShutdownTimeout() { return containerDumpHeapOnShutdownTimeout; } @Override public boolean loadCodeAsHugePages() { return loadCodeAsHugePages; } @Override public int maxUnCommittedMemory() { return maxUnCommittedMemory; } @Override public boolean forwardIssuesAsErrors() { return forwardIssuesAsErrors; } @Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; } @Override public boolean useV8GeoPositions() { return useV8GeoPositions; } @Override public int maxCompactBuffers() { return maxCompactBuffers; } @Override public List<String> ignoredHttpUserAgents() { return ignoredHttpUserAgents; } @Override public boolean useQrserverServiceName() { return useQrserverServiceName; } @Override public boolean avoidRenamingSummaryFeatures() { return avoidRenamingSummaryFeatures; } @Override public Architecture adminClusterArchitecture() { return adminClusterArchitecture; } @Override public boolean enableProxyProtocolMixedMode() { return enableProxyProtocolMixedMode; } @Override public boolean sharedStringRepoNoReclaim() { return sharedStringRepoNoReclaim; } @Override public int mbusJavaRpcNumTargets() { return mbus_java_num_targets; } @Override public int mbusJavaEventsBeforeWakeup() { return mbus_java_events_before_wakeup; } @Override public int mbusCppRpcNumTargets() { return mbus_cpp_num_targets; } @Override public int mbusCppEventsBeforeWakeup() { return mbus_cpp_events_before_wakeup; } @Override public int rpcNumTargets() { return rpc_num_targets; } @Override public int rpcEventsBeforeWakeup() { return rpc_events_before_wakeup; } @Override public String logFileCompressionAlgorithm(String defVal) { var fflag = this.logFileCompressionAlgorithm; if (fflag != null && ! fflag.equals("")) { return fflag; } return defVal; } @Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; } @Override public boolean enableGlobalPhase() { return enableGlobalPhase; } @Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.applyAsInt(id) != 0; } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, TenantName tenant, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.TENANT_ID, tenant.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Type clusterType, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, clusterType.name()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Id clusterId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_ID, clusterId.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } static int flagValueAsInt(FlagSource source, ApplicationId appId, Version version, ClusterSpec.Type clusterType, UnboundFlag<? extends Boolean, ?, ?> flag) { return flagValue(source, appId, version, clusterType, flag) ? 1 : 0; } static int flagValueAsInt(FlagSource source, ApplicationId appId, Version version, ClusterSpec.Id clusterId, UnboundFlag<? extends Boolean, ?, ?> flag) { return flagValue(source, appId, version, clusterId, flag) ? 1 : 0; } private String translateJvmOmitStackTraceInFastThrowIntToString(ToIntFunction<ClusterSpec.Type> function, ClusterSpec.Type clusterType) { return function.applyAsInt(clusterType) == 1 ? "" : "-XX:-OmitStackTraceInFastThrow"; } }
class FeatureFlags implements ModelContext.FeatureFlags { private final String queryDispatchPolicy; private final double queryDispatchWarmup; private final double defaultTermwiseLimit; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final double feedNiceness; private final List<String> allowedAthenzProxyIdentities; private final int maxActivationInhibitedOutOfSyncGroups; private final Predicate<ClusterSpec.Type> jvmOmitStackTraceInFastThrow; private final double resourceLimitDisk; private final double resourceLimitMemory; private final double minNodeRatioPerGroup; private final boolean containerDumpHeapOnShutdownTimeout; private final boolean loadCodeAsHugePages; private final double containerShutdownTimeout; private final int maxUnCommittedMemory; private final boolean forwardIssuesAsErrors; private final boolean ignoreThreadStackSizes; private final boolean useV8GeoPositions; private final int maxCompactBuffers; private final List<String> ignoredHttpUserAgents; private final boolean useQrserverServiceName; private final boolean avoidRenamingSummaryFeatures; private final Architecture adminClusterArchitecture; private final boolean enableProxyProtocolMixedMode; private final boolean sharedStringRepoNoReclaim; private final String logFileCompressionAlgorithm; private final int mbus_network_threads; private final int mbus_java_num_targets; private final int mbus_java_events_before_wakeup; private final int mbus_cpp_num_targets; private final int mbus_cpp_events_before_wakeup; private final int rpc_num_targets; private final int rpc_events_before_wakeup; private final boolean useRestrictedDataPlaneBindings; private final int heapPercentage; private final boolean enableGlobalPhase; private final String summaryDecodePolicy; private final Predicate<ClusterSpec.Id> allowMoreThanOneContentGroupDown; @Override public int heapSizePercentage() { return heapPercentage; } @Override public String queryDispatchPolicy() { return queryDispatchPolicy; } @Override public double queryDispatchWarmup() { return queryDispatchWarmup; } @Override public String summaryDecodePolicy() { return summaryDecodePolicy; } @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public double feedNiceness() { return feedNiceness; } @Override public int mbusNetworkThreads() { return mbus_network_threads; } @Override public List<String> allowedAthenzProxyIdentities() { return allowedAthenzProxyIdentities; } @Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; } @Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return translateJvmOmitStackTraceInFastThrowToString(jvmOmitStackTraceInFastThrow, type); } @Override public double resourceLimitDisk() { return resourceLimitDisk; } @Override public double resourceLimitMemory() { return resourceLimitMemory; } @Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; } @Override public double containerShutdownTimeout() { return containerShutdownTimeout; } @Override public boolean containerDumpHeapOnShutdownTimeout() { return containerDumpHeapOnShutdownTimeout; } @Override public boolean loadCodeAsHugePages() { return loadCodeAsHugePages; } @Override public int maxUnCommittedMemory() { return maxUnCommittedMemory; } @Override public boolean forwardIssuesAsErrors() { return forwardIssuesAsErrors; } @Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; } @Override public boolean useV8GeoPositions() { return useV8GeoPositions; } @Override public int maxCompactBuffers() { return maxCompactBuffers; } @Override public List<String> ignoredHttpUserAgents() { return ignoredHttpUserAgents; } @Override public boolean useQrserverServiceName() { return useQrserverServiceName; } @Override public boolean avoidRenamingSummaryFeatures() { return avoidRenamingSummaryFeatures; } @Override public Architecture adminClusterArchitecture() { return adminClusterArchitecture; } @Override public boolean enableProxyProtocolMixedMode() { return enableProxyProtocolMixedMode; } @Override public boolean sharedStringRepoNoReclaim() { return sharedStringRepoNoReclaim; } @Override public int mbusJavaRpcNumTargets() { return mbus_java_num_targets; } @Override public int mbusJavaEventsBeforeWakeup() { return mbus_java_events_before_wakeup; } @Override public int mbusCppRpcNumTargets() { return mbus_cpp_num_targets; } @Override public int mbusCppEventsBeforeWakeup() { return mbus_cpp_events_before_wakeup; } @Override public int rpcNumTargets() { return rpc_num_targets; } @Override public int rpcEventsBeforeWakeup() { return rpc_events_before_wakeup; } @Override public String logFileCompressionAlgorithm(String defVal) { var fflag = this.logFileCompressionAlgorithm; if (fflag != null && ! fflag.equals("")) { return fflag; } return defVal; } @Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; } @Override public boolean enableGlobalPhase() { return enableGlobalPhase; } @Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.test(id); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, TenantName tenant, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.TENANT_ID, tenant.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Type clusterType, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_TYPE, clusterType.name()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, ClusterSpec.Id clusterId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .with(FetchVector.Dimension.CLUSTER_ID, clusterId.value()) .with(FetchVector.Dimension.VESPA_VERSION, vespaVersion.toFullString()) .boxedValue(); } private String translateJvmOmitStackTraceInFastThrowToString(Predicate<ClusterSpec.Type> function, ClusterSpec.Type clusterType) { return function.test(clusterType) ? "" : "-XX:-OmitStackTraceInFastThrow"; } }
Will be handled when adding code to configure this in services.xml (there is no east way of checking config overrides for a specific config today)
public NodeStateChangeChecker(ContentCluster cluster, boolean inMoratorium) { this.requiredRedundancy = cluster.getDistribution().getRedundancy(); this.groupVisiting = new HierarchicalGroupVisiting(cluster.getDistribution()); this.clusterInfo = cluster.clusterInfo(); this.inMoratorium = inMoratorium; this.maxNumberOfGroupsAllowedToBeDown = cluster.maxNumberOfGroupsAllowedToBeDown(); if ( ! groupVisiting.isHierarchical() && maxNumberOfGroupsAllowedToBeDown > 1) throw new IllegalArgumentException("Cannot have both 1 group and maxNumberOfGroupsAllowedToBeDown > 1"); }
if ( ! groupVisiting.isHierarchical() && maxNumberOfGroupsAllowedToBeDown > 1)
public NodeStateChangeChecker(ContentCluster cluster, boolean inMoratorium) { this.requiredRedundancy = cluster.getDistribution().getRedundancy(); this.groupVisiting = new HierarchicalGroupVisiting(cluster.getDistribution()); this.clusterInfo = cluster.clusterInfo(); this.inMoratorium = inMoratorium; this.maxNumberOfGroupsAllowedToBeDown = cluster.maxNumberOfGroupsAllowedToBeDown(); if ( ! groupVisiting.isHierarchical() && maxNumberOfGroupsAllowedToBeDown > 1) throw new IllegalArgumentException("Cannot have both 1 group and maxNumberOfGroupsAllowedToBeDown > 1"); }
class NodeStateChangeChecker { private static final Logger log = Logger.getLogger(NodeStateChangeChecker.class.getName()); private static final String BUCKETS_METRIC_NAME = "vds.datastored.bucket_space.buckets_total"; private static final Map<String, String> BUCKETS_METRIC_DIMENSIONS = Map.of("bucketSpace", "default"); private final int requiredRedundancy; private final HierarchicalGroupVisiting groupVisiting; private final ClusterInfo clusterInfo; private final boolean inMoratorium; private final int maxNumberOfGroupsAllowedToBeDown; public static class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } } public Result evaluateTransition(Node node, ClusterState clusterState, SetUnitStateRequest.Condition condition, NodeState oldWantedState, NodeState newWantedState) { if (condition == FORCE) { return allowSettingOfWantedState(); } if (inMoratorium) { return createDisallowed("Master cluster controller is bootstrapping and in moratorium"); } if (condition != SAFE) { return createDisallowed("Condition not implemented: " + condition.name()); } if (node.getType() != STORAGE) { return createDisallowed("Safe-set of node state is only supported for storage nodes! " + "Requested node type: " + node.getType().toString()); } StorageNodeInfo nodeInfo = clusterInfo.getStorageNodeInfo(node.getIndex()); if (nodeInfo == null) { return createDisallowed("Unknown node " + node); } if (newWantedState.getState().equals(oldWantedState.getState()) && Objects.equals(newWantedState.getDescription(), oldWantedState.getDescription())) { return createAlreadySet(); } return switch (newWantedState.getState()) { case UP -> canSetStateUp(nodeInfo, oldWantedState); case MAINTENANCE -> canSetStateMaintenanceTemporarily(nodeInfo, clusterState, newWantedState.getDescription()); case DOWN -> canSetStateDownPermanently(nodeInfo, clusterState, newWantedState.getDescription()); default -> createDisallowed("Destination node state unsupported in safe mode: " + newWantedState); }; } private Result canSetStateDownPermanently(NodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } State reportedState = nodeInfo.getReportedState().getState(); if (reportedState != UP) { return createDisallowed("Reported state (" + reportedState + ") is not UP, so no bucket data is available"); } State currentState = clusterState.getNodeState(nodeInfo.getNode()).getState(); if (currentState != RETIRED) { return createDisallowed("Only retired nodes are allowed to be set to DOWN in safe mode - is " + currentState); } HostInfo hostInfo = nodeInfo.getHostInfo(); Integer hostInfoNodeVersion = hostInfo.getClusterStateVersionOrNull(); int clusterControllerVersion = clusterState.getVersion(); if (hostInfoNodeVersion == null || hostInfoNodeVersion != clusterControllerVersion) { return createDisallowed("Cluster controller at version " + clusterControllerVersion + " got info for storage node " + nodeInfo.getNodeIndex() + " at a different version " + hostInfoNodeVersion); } Optional<Metrics.Value> bucketsMetric; bucketsMetric = hostInfo.getMetrics().getValueAt(BUCKETS_METRIC_NAME, BUCKETS_METRIC_DIMENSIONS); if (bucketsMetric.isEmpty() || bucketsMetric.get().getLast() == null) { return createDisallowed("Missing last value of the " + BUCKETS_METRIC_NAME + " metric for storage node " + nodeInfo.getNodeIndex()); } long lastBuckets = bucketsMetric.get().getLast(); if (lastBuckets > 0) { return createDisallowed("The storage node manages " + lastBuckets + " buckets"); } return allowSettingOfWantedState(); } private Result canSetStateUp(NodeInfo nodeInfo, NodeState oldWantedState) { if (oldWantedState.getState() == UP) { return createAlreadySet(); } if (nodeInfo.getReportedState().getState() != UP) { return createDisallowed("Refuse to set wanted state to UP, " + "since the reported state is not UP (" + nodeInfo.getReportedState().getState() + ")"); } return allowSettingOfWantedState(); } private Result canSetStateMaintenanceTemporarily(StorageNodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } if (clusterState.getNodeState(nodeInfo.getNode()).getState() == DOWN) { log.log(FINE, "node is DOWN, allow"); return allowSettingOfWantedState(); } if (maxNumberOfGroupsAllowedToBeDown == -1) { var otherGroupCheck = anotherNodeInAnotherGroupHasWantedState(nodeInfo); if (!otherGroupCheck.settingWantedStateIsAllowed()) { return otherGroupCheck; } if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) { return allowSettingOfWantedState(); } } else { var result = otherNodesHaveWantedState(nodeInfo, newDescription); if (result.isPresent()) return result.get(); } Result allNodesAreUpCheck = checkAllNodesAreUp(clusterState); if (!allNodesAreUpCheck.settingWantedStateIsAllowed()) { log.log(FINE, "allNodesAreUpCheck: " + allNodesAreUpCheck); return allNodesAreUpCheck; } Result checkDistributorsResult = checkDistributors(nodeInfo.getNode(), clusterState.getVersion()); if (!checkDistributorsResult.settingWantedStateIsAllowed()) { log.log(FINE, "checkDistributors: "+ checkDistributorsResult); return checkDistributorsResult; } return allowSettingOfWantedState(); } /** * Returns a disallow-result if there is another node (in another group, if hierarchical) * that has a wanted state != UP. We disallow more than 1 suspended node/group at a time. */ private Result anotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) { if (groupVisiting.isHierarchical()) { SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>(); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { Result result = otherNodeInGroupHasWantedState(group); if (!result.settingWantedStateIsAllowed()) { anotherNodeHasWantedState.set(result); return false; } } return true; }); return anotherNodeHasWantedState.asOptional().orElseGet(Result::allowSettingOfWantedState); } else { return otherNodeHasWantedState(nodeInfo); } } /** * Returns an optional Result, where return value is: * For flat setup: Return Optional.of(disallowed) if wanted state is set on some node, else Optional.empty * For hierarchical setup: No wanted state for other nodes, return Optional.empty * Wanted state for nodes/groups are not UP: * if less than maxNumberOfGroupsAllowedToBeDown: return Optional.of(allowed) * else: if node is in group with nodes already down: return Optional.of(allowed), else Optional.of(disallowed) */ private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription) { Node node = nodeInfo.getNode(); if (groupVisiting.isHierarchical()) { if (maxNumberOfGroupsAllowedToBeDown <= 1) { SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>(); groupVisiting.visit(group -> { if (!groupContainsNode(group, node)) { Result result = otherNodeInGroupHasWantedState(group); if (!result.settingWantedStateIsAllowed()) { anotherNodeHasWantedState.set(result); return false; } } return true; }); if (anotherNodeHasWantedState.isPresent()) { log.log(FINE, "anotherNodeHasWantedState: " + anotherNodeHasWantedState.get()); return Optional.of(anotherNodeHasWantedState.get()); } if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) { log.log(FINE, "anotherNodeInGroupAlreadyAllowed, allow"); return Optional.of(allowSettingOfWantedState()); } } else { Set<Integer> groupsWithStorageNodesWantedStateNotUp = groupsWithStorageNodesWantedStateNotUp(); String disallowMessage = "At most nodes in " + maxNumberOfGroupsAllowedToBeDown + " groups can have wanted state"; if (groupsWithStorageNodesWantedStateNotUp.size() < maxNumberOfGroupsAllowedToBeDown) return Optional.of(allowSettingOfWantedState()); if (groupsWithStorageNodesWantedStateNotUp.size() > maxNumberOfGroupsAllowedToBeDown) return Optional.of(createDisallowed(disallowMessage)); if (aGroupContainsNode(groupsWithStorageNodesWantedStateNotUp, node)) return Optional.of(allowSettingOfWantedState()); return Optional.of(createDisallowed(disallowMessage)); } } else { var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo); if ( ! otherNodeHasWantedState.settingWantedStateIsAllowed()) return Optional.of(otherNodeHasWantedState); } return Optional.empty(); } /** Returns a disallow-result, if there is a node in the group with wanted state != UP. */ private Result otherNodeInGroupHasWantedState(Group group) { for (var configuredNode : group.getNodes()) { int index = configuredNode.index(); StorageNodeInfo storageNodeInfo = clusterInfo.getStorageNodeInfo(index); if (storageNodeInfo == null) continue; State storageNodeWantedState = storageNodeInfo.getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other storage node " + index + " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other distributor " + index + " in group " + group.getIndex() + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private Result otherNodeHasWantedState(StorageNodeInfo nodeInfo) { for (var configuredNode : clusterInfo.getConfiguredNodes().values()) { int index = configuredNode.index(); if (index == nodeInfo.getNodeIndex()) { continue; } State storageNodeWantedState = clusterInfo.getStorageNodeInfo(index).getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private boolean anotherNodeInGroupAlreadyAllowed(StorageNodeInfo nodeInfo, String newDescription) { MutableBoolean alreadyAllowed = new MutableBoolean(false); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { return true; } alreadyAllowed.set(anotherNodeInGroupAlreadyAllowed(group, nodeInfo.getNode(), newDescription)); return false; }); return alreadyAllowed.get(); } private boolean anotherNodeInGroupAlreadyAllowed(Group group, Node node, String newDescription) { return group.getNodes().stream() .filter(configuredNode -> configuredNode.index() != node.getIndex()) .map(configuredNode -> clusterInfo.getStorageNodeInfo(configuredNode.index())) .filter(Objects::nonNull) .map(NodeInfo::getUserWantedState) .anyMatch(userWantedState -> userWantedState.getState() == State.MAINTENANCE && Objects.equals(userWantedState.getDescription(), newDescription)); } private static boolean groupContainsNode(Group group, Node node) { for (ConfiguredNode configuredNode : group.getNodes()) { if (configuredNode.index() == node.getIndex()) { return true; } } return false; } private boolean aGroupContainsNode(Collection<Integer> groupIndexes, Node node) { for (Group group : getGroupsWithIndexes(groupIndexes)) { if (groupContainsNode(group, node)) return true; } return false; } private List<Group> getGroupsWithIndexes(Collection<Integer> groupIndexes) { return clusterInfo.getStorageNodeInfos().stream() .map(NodeInfo::getGroup) .filter(group -> groupIndexes.contains(group.getIndex())) .collect(Collectors.toList()); } private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); } private Result checkStorageNodesForDistributor(DistributorNodeInfo distributorNodeInfo, List<StorageNode> storageNodes, Node node) { for (StorageNode storageNode : storageNodes) { if (storageNode.getIndex() == node.getIndex()) { Integer minReplication = storageNode.getMinCurrentReplicationFactorOrNull(); if (minReplication != null && minReplication < requiredRedundancy) { return createDisallowed("Distributor " + distributorNodeInfo.getNodeIndex() + " says storage node " + node.getIndex() + " has buckets with redundancy as low as " + storageNode.getMinCurrentReplicationFactorOrNull() + ", but we require at least " + requiredRedundancy); } else { return allowSettingOfWantedState(); } } } return allowSettingOfWantedState(); } /** * We want to check with the distributors to verify that it is safe to take down the storage node. * @param node the node to be checked * @param clusterStateVersion the cluster state we expect distributors to have */ private Result checkDistributors(Node node, int clusterStateVersion) { if (clusterInfo.getDistributorNodeInfos().isEmpty()) { return createDisallowed("Not aware of any distributors, probably not safe to upgrade?"); } for (DistributorNodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { Integer distributorClusterStateVersion = distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull(); if (distributorClusterStateVersion == null) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " has not reported any cluster state version yet."); } else if (distributorClusterStateVersion != clusterStateVersion) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " does not report same version (" + distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull() + ") as fleetcontroller (" + clusterStateVersion + ")"); } List<StorageNode> storageNodes = distributorNodeInfo.getHostInfo().getDistributor().getStorageNodes(); Result storageNodesResult = checkStorageNodesForDistributor(distributorNodeInfo, storageNodes, node); if (!storageNodesResult.settingWantedStateIsAllowed()) { return storageNodesResult; } } return allowSettingOfWantedState(); } private Set<Integer> groupsWithStorageNodesWantedStateNotUp() { return clusterInfo.getStorageNodeInfos().stream() .filter(sni -> !UP.equals(sni.getWantedState().getState())) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } }
class NodeStateChangeChecker { private static final Logger log = Logger.getLogger(NodeStateChangeChecker.class.getName()); private static final String BUCKETS_METRIC_NAME = "vds.datastored.bucket_space.buckets_total"; private static final Map<String, String> BUCKETS_METRIC_DIMENSIONS = Map.of("bucketSpace", "default"); private final int requiredRedundancy; private final HierarchicalGroupVisiting groupVisiting; private final ClusterInfo clusterInfo; private final boolean inMoratorium; private final int maxNumberOfGroupsAllowedToBeDown; public static class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } } public Result evaluateTransition(Node node, ClusterState clusterState, SetUnitStateRequest.Condition condition, NodeState oldWantedState, NodeState newWantedState) { if (condition == FORCE) { return allowSettingOfWantedState(); } if (inMoratorium) { return createDisallowed("Master cluster controller is bootstrapping and in moratorium"); } if (condition != SAFE) { return createDisallowed("Condition not implemented: " + condition.name()); } if (node.getType() != STORAGE) { return createDisallowed("Safe-set of node state is only supported for storage nodes! " + "Requested node type: " + node.getType().toString()); } StorageNodeInfo nodeInfo = clusterInfo.getStorageNodeInfo(node.getIndex()); if (nodeInfo == null) { return createDisallowed("Unknown node " + node); } if (newWantedState.getState().equals(oldWantedState.getState()) && Objects.equals(newWantedState.getDescription(), oldWantedState.getDescription())) { return createAlreadySet(); } return switch (newWantedState.getState()) { case UP -> canSetStateUp(nodeInfo, oldWantedState); case MAINTENANCE -> canSetStateMaintenanceTemporarily(nodeInfo, clusterState, newWantedState.getDescription()); case DOWN -> canSetStateDownPermanently(nodeInfo, clusterState, newWantedState.getDescription()); default -> createDisallowed("Destination node state unsupported in safe mode: " + newWantedState); }; } private Result canSetStateDownPermanently(NodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } State reportedState = nodeInfo.getReportedState().getState(); if (reportedState != UP) { return createDisallowed("Reported state (" + reportedState + ") is not UP, so no bucket data is available"); } State currentState = clusterState.getNodeState(nodeInfo.getNode()).getState(); if (currentState != RETIRED) { return createDisallowed("Only retired nodes are allowed to be set to DOWN in safe mode - is " + currentState); } HostInfo hostInfo = nodeInfo.getHostInfo(); Integer hostInfoNodeVersion = hostInfo.getClusterStateVersionOrNull(); int clusterControllerVersion = clusterState.getVersion(); if (hostInfoNodeVersion == null || hostInfoNodeVersion != clusterControllerVersion) { return createDisallowed("Cluster controller at version " + clusterControllerVersion + " got info for storage node " + nodeInfo.getNodeIndex() + " at a different version " + hostInfoNodeVersion); } Optional<Metrics.Value> bucketsMetric; bucketsMetric = hostInfo.getMetrics().getValueAt(BUCKETS_METRIC_NAME, BUCKETS_METRIC_DIMENSIONS); if (bucketsMetric.isEmpty() || bucketsMetric.get().getLast() == null) { return createDisallowed("Missing last value of the " + BUCKETS_METRIC_NAME + " metric for storage node " + nodeInfo.getNodeIndex()); } long lastBuckets = bucketsMetric.get().getLast(); if (lastBuckets > 0) { return createDisallowed("The storage node manages " + lastBuckets + " buckets"); } return allowSettingOfWantedState(); } private Result canSetStateUp(NodeInfo nodeInfo, NodeState oldWantedState) { if (oldWantedState.getState() == UP) { return createAlreadySet(); } if (nodeInfo.getReportedState().getState() != UP) { return createDisallowed("Refuse to set wanted state to UP, " + "since the reported state is not UP (" + nodeInfo.getReportedState().getState() + ")"); } return allowSettingOfWantedState(); } private Result canSetStateMaintenanceTemporarily(StorageNodeInfo nodeInfo, ClusterState clusterState, String newDescription) { NodeState oldWantedState = nodeInfo.getUserWantedState(); if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) { return createDisallowed("A conflicting wanted state is already set: " + oldWantedState.getState() + ": " + oldWantedState.getDescription()); } if (maxNumberOfGroupsAllowedToBeDown == -1) { var otherGroupCheck = anotherNodeInAnotherGroupHasWantedState(nodeInfo); if (!otherGroupCheck.settingWantedStateIsAllowed()) { return otherGroupCheck; } if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) { return allowSettingOfWantedState(); } } else { var result = otherNodesHaveWantedState(nodeInfo, newDescription, clusterState); if (result.isPresent()) return result.get(); } if (clusterState.getNodeState(nodeInfo.getNode()).getState() == DOWN) { log.log(FINE, "node is DOWN, allow"); return allowSettingOfWantedState(); } Result allNodesAreUpCheck = checkAllNodesAreUp(clusterState); if (!allNodesAreUpCheck.settingWantedStateIsAllowed()) { log.log(FINE, "allNodesAreUpCheck: " + allNodesAreUpCheck); return allNodesAreUpCheck; } Result checkDistributorsResult = checkDistributors(nodeInfo.getNode(), clusterState.getVersion()); if (!checkDistributorsResult.settingWantedStateIsAllowed()) { log.log(FINE, "checkDistributors: "+ checkDistributorsResult); return checkDistributorsResult; } return allowSettingOfWantedState(); } /** * Returns a disallow-result if there is another node (in another group, if hierarchical) * that has a wanted state != UP. We disallow more than 1 suspended node/group at a time. */ private Result anotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) { if (groupVisiting.isHierarchical()) { SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>(); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { Result result = otherNodeInGroupHasWantedState(group); if (!result.settingWantedStateIsAllowed()) { anotherNodeHasWantedState.set(result); return false; } } return true; }); return anotherNodeHasWantedState.asOptional().orElseGet(Result::allowSettingOfWantedState); } else { return otherNodeHasWantedState(nodeInfo); } } /** * Returns an optional Result, where return value is: * For flat setup: Return Optional.of(disallowed) if wanted state is set on some node, else Optional.empty * For hierarchical setup: No wanted state for other nodes, return Optional.empty * Wanted state for nodes/groups are not UP: * if less than maxNumberOfGroupsAllowedToBeDown: return Optional.of(allowed) * else: if node is in group with nodes already down: return Optional.of(allowed), else Optional.of(disallowed) */ private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription, ClusterState clusterState) { Node node = nodeInfo.getNode(); if (groupVisiting.isHierarchical()) { Set<Integer> groupsWithNodesWantedStateNotUp = groupsWithUserWantedStateNotUp(); if (groupsWithNodesWantedStateNotUp.size() == 0) { log.log(FINE, "groupsWithNodesWantedStateNotUp=0"); return Optional.empty(); } Set<Integer> groupsWithSameStateAndDescription = groupsWithSameStateAndDescription(MAINTENANCE, newDescription); if (aGroupContainsNode(groupsWithSameStateAndDescription, node)) { log.log(FINE, "Node is in group with same state and description, allow"); return Optional.of(allowSettingOfWantedState()); } if (groupsWithSameStateAndDescription.size() == 0) { return Optional.of(createDisallowed("Wanted state already set for another node in groups: " + sortSetIntoList(groupsWithNodesWantedStateNotUp))); } Set<Integer> retiredAndNotUpGroups = groupsWithNotRetiredAndNotUp(clusterState); int numberOfGroupsToConsider = retiredAndNotUpGroups.size(); if (aGroupContainsNode(retiredAndNotUpGroups, node)) { numberOfGroupsToConsider = retiredAndNotUpGroups.size() - 1; } if (numberOfGroupsToConsider < maxNumberOfGroupsAllowedToBeDown) { log.log(FINE, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups); return Optional.of(allowSettingOfWantedState()); } return Optional.of(createDisallowed(String.format("At most %d groups can have wanted state: %s", maxNumberOfGroupsAllowedToBeDown, sortSetIntoList(retiredAndNotUpGroups)))); } else { var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo); if ( ! otherNodeHasWantedState.settingWantedStateIsAllowed()) return Optional.of(otherNodeHasWantedState); } return Optional.empty(); } private ArrayList<Integer> sortSetIntoList(Set<Integer> set) { var sortedList = new ArrayList<>(set); Collections.sort(sortedList); return sortedList; } /** Returns a disallow-result, if there is a node in the group with wanted state != UP. */ private Result otherNodeInGroupHasWantedState(Group group) { for (var configuredNode : group.getNodes()) { int index = configuredNode.index(); StorageNodeInfo storageNodeInfo = clusterInfo.getStorageNodeInfo(index); if (storageNodeInfo == null) continue; State storageNodeWantedState = storageNodeInfo.getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other storage node " + index + " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one group can have wanted state: Other distributor " + index + " in group " + group.getIndex() + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private Result otherNodeHasWantedState(StorageNodeInfo nodeInfo) { for (var configuredNode : clusterInfo.getConfiguredNodes().values()) { int index = configuredNode.index(); if (index == nodeInfo.getNodeIndex()) { continue; } State storageNodeWantedState = clusterInfo.getStorageNodeInfo(index).getUserWantedState().getState(); if (storageNodeWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + storageNodeWantedState); } State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState(); if (distributorWantedState != UP) { return createDisallowed( "At most one node can have a wanted state when index + " has wanted state " + distributorWantedState); } } return allowSettingOfWantedState(); } private boolean anotherNodeInGroupAlreadyAllowed(StorageNodeInfo nodeInfo, String newDescription) { MutableBoolean alreadyAllowed = new MutableBoolean(false); groupVisiting.visit(group -> { if (!groupContainsNode(group, nodeInfo.getNode())) { return true; } alreadyAllowed.set(anotherNodeInGroupAlreadyAllowed(group, nodeInfo.getNode(), newDescription)); return false; }); return alreadyAllowed.get(); } private boolean anotherNodeInGroupAlreadyAllowed(Group group, Node node, String newDescription) { return group.getNodes().stream() .filter(configuredNode -> configuredNode.index() != node.getIndex()) .map(configuredNode -> clusterInfo.getStorageNodeInfo(configuredNode.index())) .filter(Objects::nonNull) .map(NodeInfo::getUserWantedState) .anyMatch(userWantedState -> userWantedState.getState() == State.MAINTENANCE && Objects.equals(userWantedState.getDescription(), newDescription)); } private static boolean groupContainsNode(Group group, Node node) { for (ConfiguredNode configuredNode : group.getNodes()) { if (configuredNode.index() == node.getIndex()) { return true; } } return false; } private boolean aGroupContainsNode(Collection<Integer> groupIndexes, Node node) { for (Group group : getGroupsWithIndexes(groupIndexes)) { if (groupContainsNode(group, node)) return true; } return false; } private List<Group> getGroupsWithIndexes(Collection<Integer> groupIndexes) { return clusterInfo.getStorageNodeInfos().stream() .map(NodeInfo::getGroup) .filter(group -> groupIndexes.contains(group.getIndex())) .collect(Collectors.toList()); } private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another storage node has state " + state.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex()); } } for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { State wantedState = distributorNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState(); if (state != UP && state != RETIRED) { return createDisallowed("Another distributor has state " + state.toString().toUpperCase() + ": " + distributorNodeInfo.getNodeIndex()); } } return allowSettingOfWantedState(); } private Result checkStorageNodesForDistributor(DistributorNodeInfo distributorNodeInfo, List<StorageNode> storageNodes, Node node) { for (StorageNode storageNode : storageNodes) { if (storageNode.getIndex() == node.getIndex()) { Integer minReplication = storageNode.getMinCurrentReplicationFactorOrNull(); if (minReplication != null && minReplication < requiredRedundancy) { return createDisallowed("Distributor " + distributorNodeInfo.getNodeIndex() + " says storage node " + node.getIndex() + " has buckets with redundancy as low as " + storageNode.getMinCurrentReplicationFactorOrNull() + ", but we require at least " + requiredRedundancy); } else { return allowSettingOfWantedState(); } } } return allowSettingOfWantedState(); } /** * We want to check with the distributors to verify that it is safe to take down the storage node. * @param node the node to be checked * @param clusterStateVersion the cluster state we expect distributors to have */ private Result checkDistributors(Node node, int clusterStateVersion) { if (clusterInfo.getDistributorNodeInfos().isEmpty()) { return createDisallowed("Not aware of any distributors, probably not safe to upgrade?"); } for (DistributorNodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) { Integer distributorClusterStateVersion = distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull(); if (distributorClusterStateVersion == null) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " has not reported any cluster state version yet."); } else if (distributorClusterStateVersion != clusterStateVersion) { return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex() + " does not report same version (" + distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull() + ") as fleetcontroller (" + clusterStateVersion + ")"); } List<StorageNode> storageNodes = distributorNodeInfo.getHostInfo().getDistributor().getStorageNodes(); Result storageNodesResult = checkStorageNodesForDistributor(distributorNodeInfo, storageNodes, node); if (!storageNodesResult.settingWantedStateIsAllowed()) { return storageNodesResult; } } return allowSettingOfWantedState(); } private Set<Integer> groupsWithUserWantedStateNotUp() { return clusterInfo.getAllNodeInfos().stream() .filter(sni -> !UP.equals(sni.getUserWantedState().getState())) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } private Set<Integer> groupsWithSameStateAndDescription(State state, String newDescription) { return clusterInfo.getAllNodeInfos().stream() .filter(nodeInfo -> { var userWantedState = nodeInfo.getUserWantedState(); return userWantedState.getState() == state && Objects.equals(userWantedState.getDescription(), newDescription); }) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } private Set<Integer> groupsWithNotRetiredAndNotUp(ClusterState clusterState) { return clusterInfo.getAllNodeInfos().stream() .filter(nodeInfo -> (nodeInfo.getUserWantedState().getState() != RETIRED && nodeInfo.getUserWantedState().getState() != UP) || (clusterState.getNodeState(nodeInfo.getNode()).getState() != RETIRED && clusterState.getNodeState(nodeInfo.getNode()).getState() != UP)) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); } }
typo nit ```suggestion // Subtract one group if node is in a group with nodes already retired or not up, since number of such groups will ```
private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription, ClusterState clusterState) { Node node = nodeInfo.getNode(); if (groupVisiting.isHierarchical()) { Set<Integer> groupsWithNodesWantedStateNotUp = groupsWithUserWantedStateNotUp(); if (groupsWithNodesWantedStateNotUp.size() == 0) { log.log(FINE, "groupsWithNodesWantedStateNotUp=0"); return Optional.empty(); } Set<Integer> groupsWithSameStateAndDescription = groupsWithSameStateAndDescription(MAINTENANCE, newDescription); if (aGroupContainsNode(groupsWithSameStateAndDescription, node)) { log.log(FINE, "Node is in group with same state and description, allow"); return Optional.of(allowSettingOfWantedState()); } if (groupsWithSameStateAndDescription.size() == 0) { return Optional.of(createDisallowed("Wanted state already set for another node in groups: " + sortSetIntoList(groupsWithNodesWantedStateNotUp))); } Set<Integer> retiredAndNotUpGroups = groupsWithNotRetiredAndNotUp(clusterState); int numberOfGroupsToConsider = retiredAndNotUpGroups.size(); if (aGroupContainsNode(retiredAndNotUpGroups, node)) { numberOfGroupsToConsider = retiredAndNotUpGroups.size() - 1; } if (numberOfGroupsToConsider < maxNumberOfGroupsAllowedToBeDown) { log.log(FINE, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups); return Optional.of(allowSettingOfWantedState()); } return Optional.of(createDisallowed(String.format("At most %d groups can have wanted state: %s", maxNumberOfGroupsAllowedToBeDown, sortSetIntoList(retiredAndNotUpGroups)))); } else { var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo); if ( ! otherNodeHasWantedState.settingWantedStateIsAllowed()) return Optional.of(otherNodeHasWantedState); } return Optional.empty(); }
private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription, ClusterState clusterState) { Node node = nodeInfo.getNode(); if (groupVisiting.isHierarchical()) { Set<Integer> groupsWithNodesWantedStateNotUp = groupsWithUserWantedStateNotUp(); if (groupsWithNodesWantedStateNotUp.size() == 0) { log.log(FINE, "groupsWithNodesWantedStateNotUp=0"); return Optional.empty(); } Set<Integer> groupsWithSameStateAndDescription = groupsWithSameStateAndDescription(MAINTENANCE, newDescription); if (aGroupContainsNode(groupsWithSameStateAndDescription, node)) { log.log(FINE, "Node is in group with same state and description, allow"); return Optional.of(allowSettingOfWantedState()); } if (groupsWithSameStateAndDescription.size() == 0) { return Optional.of(createDisallowed("Wanted state already set for another node in groups: " + sortSetIntoList(groupsWithNodesWantedStateNotUp))); } Set<Integer> retiredAndNotUpGroups = groupsWithNotRetiredAndNotUp(clusterState); int numberOfGroupsToConsider = retiredAndNotUpGroups.size(); if (aGroupContainsNode(retiredAndNotUpGroups, node)) { numberOfGroupsToConsider = retiredAndNotUpGroups.size() - 1; } if (numberOfGroupsToConsider < maxNumberOfGroupsAllowedToBeDown) { log.log(FINE, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups); return Optional.of(allowSettingOfWantedState()); } return Optional.of(createDisallowed(String.format("At most %d groups can have wanted state: %s", maxNumberOfGroupsAllowedToBeDown, sortSetIntoList(retiredAndNotUpGroups)))); } else { var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo); if ( ! otherNodeHasWantedState.settingWantedStateIsAllowed()) return Optional.of(otherNodeHasWantedState); } return Optional.empty(); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
In a valid configuration nodes should always be configured as part of leaf groups, so I think this is always true. But it doesn't hurt to be explicit about the expectations of the node.
private Set<Integer> groupsWithUserWantedStateNotUp() { return clusterInfo.getAllNodeInfos().stream() .filter(sni -> !UP.equals(sni.getUserWantedState().getState())) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); }
.filter(Group::isLeafGroup)
private Set<Integer> groupsWithUserWantedStateNotUp() { return clusterInfo.getAllNodeInfos().stream() .filter(sni -> !UP.equals(sni.getUserWantedState().getState())) .map(NodeInfo::getGroup) .filter(Objects::nonNull) .filter(Group::isLeafGroup) .map(Group::getIndex) .collect(Collectors.toSet()); }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISALLOWED, reason); } public static Result allowSettingOfWantedState() { return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different"); } public static Result createAlreadySet() { return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective"); } public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; } public boolean wantedStateAlreadySet() { return action == Action.ALREADY_SET; } public String getReason() { return reason; } public String toString() { return "action " + action + ": " + reason; } }
This was always true
public void testVersionConfidence() { DeploymentTester tester = new DeploymentTester().atMondayMorning(); Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().maintain(); ApplicationPackage canaryPolicy = applicationPackage("canary", 7); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(canaryPolicy) .deploy(); var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default") .submit(canaryPolicy) .deploy(); var canary2 = tester.newDeploymentContext("tenant1", "canary2", "default") .submit(canaryPolicy) .deploy(); ApplicationPackage defaultPolicy = applicationPackage("default"); var default0 = tester.newDeploymentContext("tenant1", "default0", "default") .submit(applicationPackage("default", 7)) .deploy(); var default1 = tester.newDeploymentContext("tenant1", "default1", "default") .submit(defaultPolicy) .deploy(); var default2 = tester.newDeploymentContext("tenant1", "default2", "default") .submit(defaultPolicy) .deploy(); var default3 = tester.newDeploymentContext("tenant1", "default3", "default") .submit(defaultPolicy) .deploy(); var default4 = tester.newDeploymentContext("tenant1", "default4", "default") .submit(defaultPolicy) .deploy(); var default5 = tester.newDeploymentContext("tenant1", "default5", "default") .submit(defaultPolicy) .deploy(); var default6 = tester.newDeploymentContext("tenant1", "default6", "default") .submit(defaultPolicy) .deploy(); var default7 = tester.newDeploymentContext("tenant1", "default7", "default") .submit(defaultPolicy) .deploy(); var default8 = tester.newDeploymentContext("tenant1", "default8", "default") .submit(defaultPolicy) .deploy(); var default9 = tester.newDeploymentContext("tenant1", "default9", "default") .submit(defaultPolicy) .deploy(); ApplicationPackage conservativePolicy = applicationPackage("conservative"); var conservative0 = tester.newDeploymentContext("tenant1", "conservative0", "default") .submit(conservativePolicy) .deploy(); var devApp = tester.newDeploymentContext("dev", "app", "on-version-1"); var ignored0 = tester.newDeploymentContext("tenant1", "ignored0", "default"); assertEquals(Confidence.high, confidence(tester.controller(), version0), "All applications running on this version: High"); Version version1 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); tester.triggerJobs(); devApp.runJob(JobType.dev("us-east-1"), canaryApplicationPackage); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.low, confidence(tester.controller(), version1), "Just the dev app: Low"); canary0.deployPlatform(version1); canary1.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.broken, confidence(tester.controller(), version1), "One canary failed: Broken"); Version version2 = new Version("6.4"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); assertEquals(Confidence.low, confidence(tester.controller(), version2), "Confidence defaults to low for version with no applications"); canary0.deployPlatform(version2); canary1.deployPlatform(version2); assertEquals(Confidence.broken, confidence(tester.controller(), version1), "Confidence remains unchanged for version1: Broken"); assertEquals(Confidence.low, confidence(tester.controller(), version2), "Nothing has failed but not all canaries have upgraded: Low"); canary2.triggerJobs().jobAborted(systemTest).jobAborted(stagingTest); canary2.runJob(stagingTest); canary2.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.triggerJobs(); assertEquals(Confidence.normal, confidence(tester.controller(), version2), "Canaries have upgraded: Normal"); default0.deployPlatform(version2); default1.deployPlatform(version2); default2.deployPlatform(version2); default3.deployPlatform(version2); default4.deployPlatform(version2); default5.deployPlatform(version2); default6.deployPlatform(version2); default7.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); tester.controllerTester().createNewController(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(Confidence.normal, confidence(tester.controller(), version2), "All canaries deployed + < 90% of defaults: Normal"); assertTrue(tester.controller().readVersionStatus().versions().stream() .noneMatch(vespaVersion -> vespaVersion.versionNumber().equals(version1)), "Status for version without applications is removed"); default8.deployPlatform(version2); default9.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(VespaVersion.Confidence.high, confidence(tester.controller(), version2), "90% of defaults deployed successfully: High"); canary0.submit(canaryPolicy).failDeployment(systemTest); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(VespaVersion.Confidence.high, confidence(tester.controller(), version2), "90% of defaults deployed successfully: High"); canary0.deploy(); Version version3 = new Version("6.5"); tester.controllerTester().upgradeSystem(version3); tester.upgrader().maintain(); tester.triggerJobs(); canary0.deployPlatform(version3); canary1.deployPlatform(version3); canary2.deployPlatform(version3); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.triggerJobs(); default0.failDeployment(stagingTest); default1.failDeployment(stagingTest); default2.failDeployment(stagingTest); default3.failDeployment(stagingTest); default4.failDeployment(stagingTest); default5.failDeployment(stagingTest); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(Confidence.high, confidence(tester.controller(), version2), "Confidence remains unchanged for version2: High"); assertEquals(VespaVersion.Confidence.broken, confidence(tester.controller(), version3), "60% of defaults failed: Broken"); List<VespaVersion> versions = tester.controller().readVersionStatus().versions(); assertEquals(List.of("6.2", "6.4", "6.5"), versions.stream().map(version -> version.versionNumber().toString()).toList()); assertTrue(versions.get(0).isReleased()); assertFalse(versions.get(1).isReleased()); assertTrue(versions.get(2).isReleased()); Version version4 = new Version("7.1"); tester.controllerTester().upgradeSystem(version4); tester.upgrader().maintain(); tester.triggerJobs(); canary0.deployPlatform(version4); canary1.deployPlatform(version4); canary2.deployPlatform(version4); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.normal, confidence(tester.controller(), version4)); tester.upgrader().maintain(); tester.triggerJobs(); assertEquals(Change.of(version4), default0.instance().change()); default0.jobAborted(systemTest) .jobAborted(stagingTest) .deployPlatform(version4); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version4)); }
public void testVersionConfidence() { DeploymentTester tester = new DeploymentTester().atMondayMorning(); Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().maintain(); ApplicationPackage canaryPolicy = applicationPackage("canary", 7); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(canaryPolicy) .deploy(); var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default") .submit(canaryPolicy) .deploy(); var canary2 = tester.newDeploymentContext("tenant1", "canary2", "default") .submit(canaryPolicy) .deploy(); ApplicationPackage defaultPolicy = applicationPackage("default"); var default0 = tester.newDeploymentContext("tenant1", "default0", "default") .submit(applicationPackage("default", 7)) .deploy(); var default1 = tester.newDeploymentContext("tenant1", "default1", "default") .submit(defaultPolicy) .deploy(); var default2 = tester.newDeploymentContext("tenant1", "default2", "default") .submit(defaultPolicy) .deploy(); var default3 = tester.newDeploymentContext("tenant1", "default3", "default") .submit(defaultPolicy) .deploy(); var default4 = tester.newDeploymentContext("tenant1", "default4", "default") .submit(defaultPolicy) .deploy(); var default5 = tester.newDeploymentContext("tenant1", "default5", "default") .submit(defaultPolicy) .deploy(); var default6 = tester.newDeploymentContext("tenant1", "default6", "default") .submit(defaultPolicy) .deploy(); var default7 = tester.newDeploymentContext("tenant1", "default7", "default") .submit(defaultPolicy) .deploy(); var default8 = tester.newDeploymentContext("tenant1", "default8", "default") .submit(defaultPolicy) .deploy(); var default9 = tester.newDeploymentContext("tenant1", "default9", "default") .submit(defaultPolicy) .deploy(); ApplicationPackage conservativePolicy = applicationPackage("conservative"); var conservative0 = tester.newDeploymentContext("tenant1", "conservative0", "default") .submit(conservativePolicy) .deploy(); var devApp = tester.newDeploymentContext("dev", "app", "on-version-1"); var ignored0 = tester.newDeploymentContext("tenant1", "ignored0", "default"); assertEquals(Confidence.high, confidence(tester.controller(), version0), "All applications running on this version: High"); Version version1 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); tester.triggerJobs(); devApp.runJob(JobType.dev("us-east-1"), canaryApplicationPackage); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.low, confidence(tester.controller(), version1), "Just the dev app: Low"); canary0.deployPlatform(version1); canary1.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.broken, confidence(tester.controller(), version1), "One canary failed: Broken"); Version version2 = new Version("6.4"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); assertEquals(Confidence.low, confidence(tester.controller(), version2), "Confidence defaults to low for version with no applications"); canary0.deployPlatform(version2); canary1.deployPlatform(version2); assertEquals(Confidence.broken, confidence(tester.controller(), version1), "Confidence remains unchanged for version1: Broken"); assertEquals(Confidence.low, confidence(tester.controller(), version2), "Nothing has failed but not all canaries have upgraded: Low"); canary2.triggerJobs().jobAborted(systemTest).jobAborted(stagingTest); canary2.runJob(stagingTest); canary2.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.triggerJobs(); assertEquals(Confidence.normal, confidence(tester.controller(), version2), "Canaries have upgraded: Normal"); default0.deployPlatform(version2); default1.deployPlatform(version2); default2.deployPlatform(version2); default3.deployPlatform(version2); default4.deployPlatform(version2); default5.deployPlatform(version2); default6.deployPlatform(version2); default7.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); tester.controllerTester().createNewController(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(Confidence.normal, confidence(tester.controller(), version2), "All canaries deployed + < 90% of defaults: Normal"); assertTrue(tester.controller().readVersionStatus().versions().stream() .noneMatch(vespaVersion -> vespaVersion.versionNumber().equals(version1)), "Status for version without applications is removed"); default8.deployPlatform(version2); default9.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(VespaVersion.Confidence.high, confidence(tester.controller(), version2), "90% of defaults deployed successfully: High"); canary0.submit(canaryPolicy).failDeployment(systemTest); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(VespaVersion.Confidence.high, confidence(tester.controller(), version2), "90% of defaults deployed successfully: High"); canary0.deploy(); Version version3 = new Version("6.5"); tester.controllerTester().upgradeSystem(version3); tester.upgrader().maintain(); tester.triggerJobs(); canary0.deployPlatform(version3); canary1.deployPlatform(version3); canary2.deployPlatform(version3); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.triggerJobs(); default0.failDeployment(stagingTest); default1.failDeployment(stagingTest); default2.failDeployment(stagingTest); default3.failDeployment(stagingTest); default4.failDeployment(stagingTest); default5.failDeployment(stagingTest); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0), "Confidence remains unchanged for version0: High"); assertEquals(Confidence.high, confidence(tester.controller(), version2), "Confidence remains unchanged for version2: High"); assertEquals(VespaVersion.Confidence.broken, confidence(tester.controller(), version3), "60% of defaults failed: Broken"); List<VespaVersion> versions = tester.controller().readVersionStatus().versions(); assertEquals(List.of("6.2", "6.4", "6.5"), versions.stream().map(version -> version.versionNumber().toString()).toList()); assertTrue(versions.get(0).isReleased()); assertFalse(versions.get(1).isReleased()); assertTrue(versions.get(2).isReleased()); Version version4 = new Version("7.1"); tester.controllerTester().upgradeSystem(version4); tester.upgrader().maintain(); tester.triggerJobs(); canary0.deployPlatform(version4); canary1.deployPlatform(version4); canary2.deployPlatform(version4); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.normal, confidence(tester.controller(), version4)); tester.upgrader().maintain(); tester.triggerJobs(); assertEquals(Change.of(version4), default0.instance().change()); default0.jobAborted(systemTest) .jobAborted(stagingTest) .deployPlatform(version4); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version4)); }
class VersionStatusTest { @Test public void testEmptyVersionStatus() { VersionStatus status = VersionStatus.empty(); assertFalse(status.systemVersion().isPresent()); assertTrue(status.versions().isEmpty()); } @Test public void testSystemVersionIsControllerVersionIfConfigServersAreNewer() { ControllerTester tester = new ControllerTester(); Version controllerVersion = tester.controller().readVersionStatus().controllerVersion().get().versionNumber(); Version largerThanCurrent = new Version(controllerVersion.getMajor() + 1); tester.upgradeSystemApplications(largerThanCurrent); VersionStatus versionStatus = VersionStatus.compute(tester.controller()); assertEquals(controllerVersion, versionStatus.systemVersion().get().versionNumber()); } @Test public void testSystemVersionIsVersionOfOldestConfigServer() { ControllerTester tester = new ControllerTester(); Version version0 = Version.fromString("6.1"); Version version1 = Version.fromString("6.5"); for (ZoneApi zone : tester.zoneRegistry().zones().all().zones()) { for (Node node : tester.configServer().nodeRepository().list(zone.getId(), NodeFilter.all().applications(SystemApplication.configServer.id()))) { Node upgradedNode = Node.builder(node).currentVersion(version1).build(); tester.configServer().nodeRepository().putNodes(zone.getId(), upgradedNode); break; } } VersionStatus versionStatus = VersionStatus.compute(tester.controller()); assertEquals(version0, versionStatus.systemVersion().get().versionNumber()); } @Test public void testControllerVersion() { HostName controller1 = HostName.of("controller-1"); HostName controller2 = HostName.of("controller-2"); HostName controller3 = HostName.of("controller-3"); MockCuratorDb db = new MockCuratorDb(Stream.of(controller1, controller2, controller3) .map(hostName -> hostName.value() + ":2222") .collect(Collectors.joining(","))); ControllerTester tester = new ControllerTester(db); writeControllerVersion(controller1, Version.fromString("6.2"), db); writeControllerVersion(controller2, Version.fromString("6.1"), db); writeControllerVersion(controller3, Version.fromString("6.2"), db); VersionStatus versionStatus = VersionStatus.compute(tester.controller()); assertTrue(versionStatus.controllerVersion().isEmpty(), "Controller version is unknown during upgrade"); writeControllerVersion(controller2, Version.fromString("6.2"), db); versionStatus = VersionStatus.compute(tester.controller()); assertEquals(Version.fromString("6.2"), versionStatus.controllerVersion().get().versionNumber()); } @Test public void testSystemVersionNeverShrinks() { ControllerTester tester = new ControllerTester(); Version version0 = Version.fromString("6.2"); tester.upgradeSystem(version0); assertEquals(version0, tester.controller().readSystemVersion()); Version ancientVersion = Version.fromString("5.1"); for (ZoneApi zone : tester.controller().zoneRegistry().zones().all().zones()) { for (Node node : tester.configServer().nodeRepository().list(zone.getId(), NodeFilter.all().applications(SystemApplication.configServer.id()))) { Node downgradedNode = Node.builder(node).currentVersion(ancientVersion).build(); tester.configServer().nodeRepository().putNodes(zone.getId(), downgradedNode); break; } } tester.computeVersionStatus(); assertEquals(version0, tester.controller().readSystemVersion()); } @Test public void testVersionStatusAfterApplicationUpdates() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = applicationPackage("default"); Version version0 = new Version("6.1"); tester.controllerTester().upgradeSystem(version0); var context0 = tester.newDeploymentContext("tenant1", "app0", "default").runJob(JobType.dev("us-east-1"), applicationPackage); Version version1 = new Version("6.2"); Version version2 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); var context1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage).deploy(); var context2 = tester.newDeploymentContext("tenant1", "app2", "default").submit(applicationPackage).deploy(); var context3 = tester.newDeploymentContext("tenant1", "app3", "default").submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); context1.timeOutConvergence(systemTest); context2.runJob(systemTest) .runJob(stagingTest) .runJob(productionUsWest1) .failDeployment(productionUsEast3); context3.timeOutUpgrade(stagingTest); tester.triggerJobs(); tester.controllerTester().computeVersionStatus(); VersionStatus status = tester.controller().readVersionStatus(); assertEquals(3, status.versions().size(), "The three versions above exist"); tester.controller().applications().deactivate(context0.instanceId(), JobType.dev("us-east-1").zone()); tester.controllerTester().computeVersionStatus(); List<VespaVersion> versions = tester.controller().readVersionStatus().versions(); assertEquals(2, versions.size(), "The two last versions above exist after dev deployment is gone"); VespaVersion v1 = versions.get(0); assertEquals(version1, v1.versionNumber()); var statistics = DeploymentStatistics.compute(List.of(version1, version2), tester.deploymentStatuses()); var statistics1 = statistics.get(0); assertJobsRun("No runs are failing on version1.", Map.of(context1.instanceId(), List.of(), context2.instanceId(), List.of(), context3.instanceId(), List.of()), statistics1.failingUpgrades()); assertJobsRun("All applications have at least one active production deployment on version 1.", Map.of(context1.instanceId(), List.of(productionUsWest1, productionUsEast3), context2.instanceId(), List.of(productionUsEast3), context3.instanceId(), List.of(productionUsWest1, productionUsEast3)), statistics1.productionSuccesses()); assertEquals( List.of(), statistics1.runningUpgrade(), "No applications have active deployment jobs on version1."); VespaVersion v2 = versions.get(1); assertEquals(version2, v2.versionNumber()); var statistics2 = statistics.get(1); assertJobsRun("All applications have failed on version2 in at least one zone.", Map.of(context1.instanceId(), List.of(systemTest), context2.instanceId(), List.of(productionUsEast3), context3.instanceId(), List.of(stagingTest)), statistics2.failingUpgrades()); assertJobsRun("Only app2 has successfully deployed to production on version2.", Map.of(context1.instanceId(), List.of(), context2.instanceId(), List.of(productionUsWest1), context3.instanceId(), List.of()), statistics2.productionSuccesses()); assertJobsRun("All applications are being retried on version2.", Map.of(context1.instanceId(), List.of(systemTest, stagingTest), context2.instanceId(), List.of(productionUsEast3), context3.instanceId(), List.of(systemTest, stagingTest)), statistics2.runningUpgrade()); } private static void assertJobsRun(String assertion, Map<ApplicationId, List<JobType>> jobs, List<Run> runs) { assertEquals(jobs.entrySet().stream() .flatMap(entry -> entry.getValue().stream().map(type -> new JobId(entry.getKey(), type))) .collect(toSet()), runs.stream() .map(run -> run.id().job()) .collect(toSet()), assertion); } @Test @Test public void testConfidenceWithLingeringVersions() { DeploymentTester tester = new DeploymentTester().atMondayMorning(); Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().maintain(); var appPackage = applicationPackage("canary"); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(appPackage) .deploy(); assertEquals( Confidence.high, confidence(tester.controller(), version0), "All applications running on this version: High"); Version version1 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); tester.triggerJobs(); canary0.failDeployment(systemTest); canary0.abortJob(stagingTest); tester.controllerTester().computeVersionStatus(); assertEquals( Confidence.broken, confidence(tester.controller(), version1), "One canary failed: Broken"); Version version2 = new Version("6.4"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); assertEquals( Confidence.broken, confidence(tester.controller(), version1), "Confidence remains unchanged for version1 until app overrides old tests: Broken"); assertEquals( Confidence.low, confidence(tester.controller(), version2), "Confidence defaults to low for version with no applications"); assertEquals(version2, canary0.instance().change().platform().orElseThrow()); canary0.failDeployment(systemTest); canary0.abortJob(stagingTest); tester.controllerTester().computeVersionStatus(); assertFalse( tester.controller().readVersionStatus().versions().stream().anyMatch(version -> version.versionNumber().equals(version1)), "Previous version should be forgotten, as canary only had test jobs run on it"); canary0.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); assertEquals( Confidence.broken, confidence(tester.controller(), version2), "One canary failed: Broken"); Version version3 = new Version("6.5"); tester.controllerTester().upgradeSystem(version3); tester.upgrader().maintain(); assertEquals( Confidence.broken, confidence(tester.controller(), version2), "Confidence remains unchanged for version2: Broken"); assertEquals( Confidence.low, confidence(tester.controller(), version3), "Confidence defaults to low for version with no applications"); assertEquals(version3, canary0.instance().change().platform().orElseThrow()); canary0.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertEquals( Confidence.broken, confidence(tester.controller(), version2), "Confidence remains unchanged for version2: Broken"); assertEquals( Confidence.broken, confidence(tester.controller(), version3), "Canary broken, so confidence for version3: Broken"); canary0.runJob(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertFalse( tester.controller().readVersionStatus().versions().stream().anyMatch(version -> version.versionNumber().equals(version2)), "Previous version should be forgotten, as canary only had test jobs run on it"); assertEquals( Confidence.low, confidence(tester.controller(), version3), "Canary OK, but not done upgrading, so confidence for version3: Low"); } @Test public void testConfidenceOverride() { DeploymentTester tester = new DeploymentTester(); Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); var app = tester.newDeploymentContext("tenant1", "app1", "default") .submit() .deploy(); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0)); tester.upgrader().overrideConfidence(version0, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.broken, confidence(tester.controller(), version0)); Version version1 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.deployPlatform(version1); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version1)); assertFalse( tester.controller().curator().readConfidenceOverrides() .containsKey(version0), "Stale override removed"); } @Test public void testCommitDetailsPreservation() { HostName controller1 = HostName.of("controller-1"); HostName controller2 = HostName.of("controller-2"); HostName controller3 = HostName.of("controller-3"); MockCuratorDb db = new MockCuratorDb(Stream.of(controller1, controller2, controller3) .map(hostName -> hostName.value() + ":2222") .collect(Collectors.joining(","))); DeploymentTester tester = new DeploymentTester(new ControllerTester(db)); var version0 = tester.controllerTester().nextVersion(); var commitSha0 = "badc0ffee"; var commitDate0 = Instant.EPOCH; tester.controllerTester().upgradeSystem(version0); assertEquals(version0, tester.controller().readVersionStatus().systemVersion().get().versionNumber()); assertEquals(commitSha0, tester.controller().readVersionStatus().systemVersion().get().releaseCommit()); assertEquals(commitDate0, tester.controller().readVersionStatus().systemVersion().get().committedAt()); tester.newDeploymentContext().submit().deploy(); var version1 = tester.controllerTester().nextVersion(); var commitSha1 = "deadbeef"; var commitDate1 = Instant.ofEpochMilli(123); tester.controllerTester().upgradeController(version1, commitSha1, commitDate1); tester.controllerTester().upgradeSystemApplications(version1); assertEquals(version1, tester.controller().readVersionStatus().systemVersion().get().versionNumber()); assertEquals(commitSha1, tester.controller().readVersionStatus().systemVersion().get().releaseCommit()); assertEquals(commitDate1, tester.controller().readVersionStatus().systemVersion().get().committedAt()); assertEquals(commitSha0, tester.controller().readVersionStatus().version(version0).releaseCommit()); assertEquals(commitDate0, tester.controller().readVersionStatus().version(version0).committedAt()); } @Test public void testConfidenceChangeRespectsTimeWindow() { DeploymentTester tester = new DeploymentTester().atMondayMorning(); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(applicationPackage("canary")) .deploy(); var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default") .submit(applicationPackage("canary")) .deploy(); var default0 = tester.newDeploymentContext("tenant1", "default0", "default") .submit(applicationPackage("default")) .deploy(); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.high, tester.controller().readVersionStatus().version(version0).confidence()); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); canary0.deployPlatform(version1); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.low, tester.controller().readVersionStatus().version(version1).confidence()); assertEquals(12, tester.controllerTester().hourOfDayAfter(Duration.ofHours(7))); canary1.failDeployment(systemTest); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.broken, tester.controller().readVersionStatus().version(version1).confidence()); assertEquals(20, tester.controllerTester().hourOfDayAfter(Duration.ofHours(8))); canary1.deployPlatform(version1); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.broken, tester.controller().readVersionStatus().version(version1).confidence()); assertEquals(5, tester.controllerTester().hourOfDayAfter(Duration.ofHours(9))); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.normal, tester.controller().readVersionStatus().version(version1).confidence()); tester.upgrader().maintain(); tester.triggerJobs(); default0.deployPlatform(version1); Version version2 = Version.fromString("7.3"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); assertEquals(14, tester.controllerTester().hourOfDayAfter(Duration.ofHours(9))); canary0.deployPlatform(version2); canary1.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.low, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.normal, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.low, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().removeConfidenceOverride(version2); assertEquals(7, tester.controllerTester().hourOfDayAfter(Duration.ofHours(17))); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.normal, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().maintain(); tester.triggerJobs(); default0.deployPlatform(version2); } @Test public void testStatusIncludesIncompleteUpgrades() { var tester = new DeploymentTester().atMondayMorning(); var version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); var context = tester.newDeploymentContext("tenant1", "default0", "default"); context.submit(applicationPackage("default")).deploy(); var version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); context.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); for (var version : List.of(version0, version1)) { assertOnVersion(version, context.instanceId(), tester); } var version2 = Version.fromString("7.3"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); context.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); for (var version : List.of(version0, version1, version2)) { assertOnVersion(version, context.instanceId(), tester); } context.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); assertEquals(1, tester.controller().readVersionStatus().versions().size()); assertOnVersion(version2, context.instanceId(), tester); var version3 = Version.fromString("7.4"); tester.controllerTester().upgradeSystem(version3); tester.upgrader().maintain(); context.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertEquals(2, tester.controller().readVersionStatus().versions().size()); for (var version : List.of(version2, version3)) { assertOnVersion(version, context.instanceId(), tester); } } private void assertOnVersion(Version version, ApplicationId instance, DeploymentTester tester) { var vespaVersion = tester.controller().readVersionStatus().version(version); assertNotNull(vespaVersion, "Statistics for version " + version + " exist"); var statistics = DeploymentStatistics.compute(List.of(version), tester.deploymentStatuses()).get(0); assertTrue( Stream.of(statistics.productionSuccesses(), statistics.failingUpgrades(), statistics.runningUpgrade()) .anyMatch(runs -> runs.stream().anyMatch(run -> run.id().application().equals(instance))), "Application is on version " + version); } private static void writeControllerVersion(HostName hostname, Version version, CuratorDb db) { db.writeControllerVersion(hostname, new ControllerVersion(version, "badc0ffee", Instant.EPOCH)); } private Confidence confidence(Controller controller, Version version) { return controller.readVersionStatus().versions().stream() .filter(v -> v.versionNumber().equals(version)) .findFirst() .map(VespaVersion::confidence) .orElseThrow(() -> new IllegalArgumentException("Expected to find version: " + version)); } private static ApplicationPackage applicationPackage(String upgradePolicy, int majorVersion) { return new ApplicationPackageBuilder().upgradePolicy(upgradePolicy) .region("us-west-1") .region("us-east-3") .majorVersion(majorVersion) .build(); } private static final ApplicationPackage canaryApplicationPackage = new ApplicationPackageBuilder().upgradePolicy("canary") .region("us-west-1") .region("us-east-3") .build(); private static final ApplicationPackage defaultApplicationPackage = new ApplicationPackageBuilder().upgradePolicy("default") .region("us-west-1") .region("us-east-3") .build(); private static final ApplicationPackage conservativeApplicationPackage = new ApplicationPackageBuilder().upgradePolicy("conservative") .region("us-west-1") .region("us-east-3") .build(); /** Returns empty prebuilt applications for efficiency */ private ApplicationPackage applicationPackage(String upgradePolicy) { return switch (upgradePolicy) { case "canary" -> canaryApplicationPackage; case "default" -> defaultApplicationPackage; case "conservative" -> conservativeApplicationPackage; default -> throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'"); }; } }
class VersionStatusTest { @Test public void testEmptyVersionStatus() { VersionStatus status = VersionStatus.empty(); assertFalse(status.systemVersion().isPresent()); assertTrue(status.versions().isEmpty()); } @Test public void testSystemVersionIsControllerVersionIfConfigServersAreNewer() { ControllerTester tester = new ControllerTester(); Version controllerVersion = tester.controller().readVersionStatus().controllerVersion().get().versionNumber(); Version largerThanCurrent = new Version(controllerVersion.getMajor() + 1); tester.upgradeSystemApplications(largerThanCurrent); VersionStatus versionStatus = VersionStatus.compute(tester.controller()); assertEquals(controllerVersion, versionStatus.systemVersion().get().versionNumber()); } @Test public void testSystemVersionIsVersionOfOldestConfigServer() { ControllerTester tester = new ControllerTester(); Version version0 = Version.fromString("6.1"); Version version1 = Version.fromString("6.5"); for (ZoneApi zone : tester.zoneRegistry().zones().all().zones()) { for (Node node : tester.configServer().nodeRepository().list(zone.getId(), NodeFilter.all().applications(SystemApplication.configServer.id()))) { Node upgradedNode = Node.builder(node).currentVersion(version1).build(); tester.configServer().nodeRepository().putNodes(zone.getId(), upgradedNode); break; } } VersionStatus versionStatus = VersionStatus.compute(tester.controller()); assertEquals(version0, versionStatus.systemVersion().get().versionNumber()); } @Test public void testControllerVersion() { HostName controller1 = HostName.of("controller-1"); HostName controller2 = HostName.of("controller-2"); HostName controller3 = HostName.of("controller-3"); MockCuratorDb db = new MockCuratorDb(Stream.of(controller1, controller2, controller3) .map(hostName -> hostName.value() + ":2222") .collect(Collectors.joining(","))); ControllerTester tester = new ControllerTester(db); writeControllerVersion(controller1, Version.fromString("6.2"), db); writeControllerVersion(controller2, Version.fromString("6.1"), db); writeControllerVersion(controller3, Version.fromString("6.2"), db); VersionStatus versionStatus = VersionStatus.compute(tester.controller()); assertTrue(versionStatus.controllerVersion().isEmpty(), "Controller version is unknown during upgrade"); writeControllerVersion(controller2, Version.fromString("6.2"), db); versionStatus = VersionStatus.compute(tester.controller()); assertEquals(Version.fromString("6.2"), versionStatus.controllerVersion().get().versionNumber()); } @Test public void testSystemVersionNeverShrinks() { ControllerTester tester = new ControllerTester(); Version version0 = Version.fromString("6.2"); tester.upgradeSystem(version0); assertEquals(version0, tester.controller().readSystemVersion()); Version ancientVersion = Version.fromString("5.1"); for (ZoneApi zone : tester.controller().zoneRegistry().zones().all().zones()) { for (Node node : tester.configServer().nodeRepository().list(zone.getId(), NodeFilter.all().applications(SystemApplication.configServer.id()))) { Node downgradedNode = Node.builder(node).currentVersion(ancientVersion).build(); tester.configServer().nodeRepository().putNodes(zone.getId(), downgradedNode); break; } } tester.computeVersionStatus(); assertEquals(version0, tester.controller().readSystemVersion()); } @Test public void testVersionStatusAfterApplicationUpdates() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = applicationPackage("default"); Version version0 = new Version("6.1"); tester.controllerTester().upgradeSystem(version0); var context0 = tester.newDeploymentContext("tenant1", "app0", "default").runJob(JobType.dev("us-east-1"), applicationPackage); Version version1 = new Version("6.2"); Version version2 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); var context1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage).deploy(); var context2 = tester.newDeploymentContext("tenant1", "app2", "default").submit(applicationPackage).deploy(); var context3 = tester.newDeploymentContext("tenant1", "app3", "default").submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); context1.timeOutConvergence(systemTest); context2.runJob(systemTest) .runJob(stagingTest) .runJob(productionUsWest1) .failDeployment(productionUsEast3); context3.timeOutUpgrade(stagingTest); tester.triggerJobs(); tester.controllerTester().computeVersionStatus(); VersionStatus status = tester.controller().readVersionStatus(); assertEquals(3, status.versions().size(), "The three versions above exist"); tester.controller().applications().deactivate(context0.instanceId(), JobType.dev("us-east-1").zone()); tester.controllerTester().computeVersionStatus(); List<VespaVersion> versions = tester.controller().readVersionStatus().versions(); assertEquals(2, versions.size(), "The two last versions above exist after dev deployment is gone"); VespaVersion v1 = versions.get(0); assertEquals(version1, v1.versionNumber()); var statistics = DeploymentStatistics.compute(List.of(version1, version2), tester.deploymentStatuses()); var statistics1 = statistics.get(0); assertJobsRun("No runs are failing on version1.", Map.of(context1.instanceId(), List.of(), context2.instanceId(), List.of(), context3.instanceId(), List.of()), statistics1.failingUpgrades()); assertJobsRun("All applications have at least one active production deployment on version 1.", Map.of(context1.instanceId(), List.of(productionUsWest1, productionUsEast3), context2.instanceId(), List.of(productionUsEast3), context3.instanceId(), List.of(productionUsWest1, productionUsEast3)), statistics1.productionSuccesses()); assertEquals( List.of(), statistics1.runningUpgrade(), "No applications have active deployment jobs on version1."); VespaVersion v2 = versions.get(1); assertEquals(version2, v2.versionNumber()); var statistics2 = statistics.get(1); assertJobsRun("All applications have failed on version2 in at least one zone.", Map.of(context1.instanceId(), List.of(systemTest), context2.instanceId(), List.of(productionUsEast3), context3.instanceId(), List.of(stagingTest)), statistics2.failingUpgrades()); assertJobsRun("Only app2 has successfully deployed to production on version2.", Map.of(context1.instanceId(), List.of(), context2.instanceId(), List.of(productionUsWest1), context3.instanceId(), List.of()), statistics2.productionSuccesses()); assertJobsRun("All applications are being retried on version2.", Map.of(context1.instanceId(), List.of(systemTest, stagingTest), context2.instanceId(), List.of(productionUsEast3), context3.instanceId(), List.of(systemTest, stagingTest)), statistics2.runningUpgrade()); } private static void assertJobsRun(String assertion, Map<ApplicationId, List<JobType>> jobs, List<Run> runs) { assertEquals(jobs.entrySet().stream() .flatMap(entry -> entry.getValue().stream().map(type -> new JobId(entry.getKey(), type))) .collect(toSet()), runs.stream() .map(run -> run.id().job()) .collect(toSet()), assertion); } @Test @Test public void testConfidenceWithLingeringVersions() { DeploymentTester tester = new DeploymentTester().atMondayMorning(); Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().maintain(); var appPackage = applicationPackage("canary"); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(appPackage) .deploy(); assertEquals( Confidence.high, confidence(tester.controller(), version0), "All applications running on this version: High"); Version version1 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); tester.triggerJobs(); canary0.failDeployment(systemTest); canary0.abortJob(stagingTest); tester.controllerTester().computeVersionStatus(); assertEquals( Confidence.broken, confidence(tester.controller(), version1), "One canary failed: Broken"); Version version2 = new Version("6.4"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); assertEquals( Confidence.broken, confidence(tester.controller(), version1), "Confidence remains unchanged for version1 until app overrides old tests: Broken"); assertEquals( Confidence.low, confidence(tester.controller(), version2), "Confidence defaults to low for version with no applications"); assertEquals(version2, canary0.instance().change().platform().orElseThrow()); canary0.failDeployment(systemTest); canary0.abortJob(stagingTest); tester.controllerTester().computeVersionStatus(); assertFalse( tester.controller().readVersionStatus().versions().stream().anyMatch(version -> version.versionNumber().equals(version1)), "Previous version should be forgotten, as canary only had test jobs run on it"); canary0.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); assertEquals( Confidence.broken, confidence(tester.controller(), version2), "One canary failed: Broken"); Version version3 = new Version("6.5"); tester.controllerTester().upgradeSystem(version3); tester.upgrader().maintain(); assertEquals( Confidence.broken, confidence(tester.controller(), version2), "Confidence remains unchanged for version2: Broken"); assertEquals( Confidence.low, confidence(tester.controller(), version3), "Confidence defaults to low for version with no applications"); assertEquals(version3, canary0.instance().change().platform().orElseThrow()); canary0.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertEquals( Confidence.broken, confidence(tester.controller(), version2), "Confidence remains unchanged for version2: Broken"); assertEquals( Confidence.broken, confidence(tester.controller(), version3), "Canary broken, so confidence for version3: Broken"); canary0.runJob(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertFalse( tester.controller().readVersionStatus().versions().stream().anyMatch(version -> version.versionNumber().equals(version2)), "Previous version should be forgotten, as canary only had test jobs run on it"); assertEquals( Confidence.low, confidence(tester.controller(), version3), "Canary OK, but not done upgrading, so confidence for version3: Low"); } @Test public void testConfidenceOverride() { DeploymentTester tester = new DeploymentTester(); Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); var app = tester.newDeploymentContext("tenant1", "app1", "default") .submit() .deploy(); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version0)); tester.upgrader().overrideConfidence(version0, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.broken, confidence(tester.controller(), version0)); Version version1 = new Version("6.3"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.deployPlatform(version1); tester.controllerTester().computeVersionStatus(); assertEquals(Confidence.high, confidence(tester.controller(), version1)); assertFalse( tester.controller().curator().readConfidenceOverrides() .containsKey(version0), "Stale override removed"); } @Test public void testCommitDetailsPreservation() { HostName controller1 = HostName.of("controller-1"); HostName controller2 = HostName.of("controller-2"); HostName controller3 = HostName.of("controller-3"); MockCuratorDb db = new MockCuratorDb(Stream.of(controller1, controller2, controller3) .map(hostName -> hostName.value() + ":2222") .collect(Collectors.joining(","))); DeploymentTester tester = new DeploymentTester(new ControllerTester(db)); var version0 = tester.controllerTester().nextVersion(); var commitSha0 = "badc0ffee"; var commitDate0 = Instant.EPOCH; tester.controllerTester().upgradeSystem(version0); assertEquals(version0, tester.controller().readVersionStatus().systemVersion().get().versionNumber()); assertEquals(commitSha0, tester.controller().readVersionStatus().systemVersion().get().releaseCommit()); assertEquals(commitDate0, tester.controller().readVersionStatus().systemVersion().get().committedAt()); tester.newDeploymentContext().submit().deploy(); var version1 = tester.controllerTester().nextVersion(); var commitSha1 = "deadbeef"; var commitDate1 = Instant.ofEpochMilli(123); tester.controllerTester().upgradeController(version1, commitSha1, commitDate1); tester.controllerTester().upgradeSystemApplications(version1); assertEquals(version1, tester.controller().readVersionStatus().systemVersion().get().versionNumber()); assertEquals(commitSha1, tester.controller().readVersionStatus().systemVersion().get().releaseCommit()); assertEquals(commitDate1, tester.controller().readVersionStatus().systemVersion().get().committedAt()); assertEquals(commitSha0, tester.controller().readVersionStatus().version(version0).releaseCommit()); assertEquals(commitDate0, tester.controller().readVersionStatus().version(version0).committedAt()); } @Test public void testConfidenceChangeRespectsTimeWindow() { DeploymentTester tester = new DeploymentTester().atMondayMorning(); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(applicationPackage("canary")) .deploy(); var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default") .submit(applicationPackage("canary")) .deploy(); var default0 = tester.newDeploymentContext("tenant1", "default0", "default") .submit(applicationPackage("default")) .deploy(); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.high, tester.controller().readVersionStatus().version(version0).confidence()); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); canary0.deployPlatform(version1); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.low, tester.controller().readVersionStatus().version(version1).confidence()); assertEquals(12, tester.controllerTester().hourOfDayAfter(Duration.ofHours(7))); canary1.failDeployment(systemTest); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.broken, tester.controller().readVersionStatus().version(version1).confidence()); assertEquals(20, tester.controllerTester().hourOfDayAfter(Duration.ofHours(8))); canary1.deployPlatform(version1); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.broken, tester.controller().readVersionStatus().version(version1).confidence()); assertEquals(5, tester.controllerTester().hourOfDayAfter(Duration.ofHours(9))); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.normal, tester.controller().readVersionStatus().version(version1).confidence()); tester.upgrader().maintain(); tester.triggerJobs(); default0.deployPlatform(version1); Version version2 = Version.fromString("7.3"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); assertEquals(14, tester.controllerTester().hourOfDayAfter(Duration.ofHours(9))); canary0.deployPlatform(version2); canary1.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.low, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.normal, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.low, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().removeConfidenceOverride(version2); assertEquals(7, tester.controllerTester().hourOfDayAfter(Duration.ofHours(17))); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.normal, tester.controller().readVersionStatus().version(version2).confidence()); tester.upgrader().maintain(); tester.triggerJobs(); default0.deployPlatform(version2); } @Test public void testStatusIncludesIncompleteUpgrades() { var tester = new DeploymentTester().atMondayMorning(); var version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); var context = tester.newDeploymentContext("tenant1", "default0", "default"); context.submit(applicationPackage("default")).deploy(); var version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); context.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); for (var version : List.of(version0, version1)) { assertOnVersion(version, context.instanceId(), tester); } var version2 = Version.fromString("7.3"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); context.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); for (var version : List.of(version0, version1, version2)) { assertOnVersion(version, context.instanceId(), tester); } context.deployPlatform(version2); tester.controllerTester().computeVersionStatus(); assertEquals(1, tester.controller().readVersionStatus().versions().size()); assertOnVersion(version2, context.instanceId(), tester); var version3 = Version.fromString("7.4"); tester.controllerTester().upgradeSystem(version3); tester.upgrader().maintain(); context.runJob(systemTest) .runJob(stagingTest) .failDeployment(productionUsWest1); tester.controllerTester().computeVersionStatus(); assertEquals(2, tester.controller().readVersionStatus().versions().size()); for (var version : List.of(version2, version3)) { assertOnVersion(version, context.instanceId(), tester); } } private void assertOnVersion(Version version, ApplicationId instance, DeploymentTester tester) { var vespaVersion = tester.controller().readVersionStatus().version(version); assertNotNull(vespaVersion, "Statistics for version " + version + " exist"); var statistics = DeploymentStatistics.compute(List.of(version), tester.deploymentStatuses()).get(0); assertTrue( Stream.of(statistics.productionSuccesses(), statistics.failingUpgrades(), statistics.runningUpgrade()) .anyMatch(runs -> runs.stream().anyMatch(run -> run.id().application().equals(instance))), "Application is on version " + version); } private static void writeControllerVersion(HostName hostname, Version version, CuratorDb db) { db.writeControllerVersion(hostname, new ControllerVersion(version, "badc0ffee", Instant.EPOCH)); } private Confidence confidence(Controller controller, Version version) { return controller.readVersionStatus().versions().stream() .filter(v -> v.versionNumber().equals(version)) .findFirst() .map(VespaVersion::confidence) .orElseThrow(() -> new IllegalArgumentException("Expected to find version: " + version)); } private static ApplicationPackage applicationPackage(String upgradePolicy, int majorVersion) { return new ApplicationPackageBuilder().upgradePolicy(upgradePolicy) .region("us-west-1") .region("us-east-3") .majorVersion(majorVersion) .build(); } private static final ApplicationPackage canaryApplicationPackage = new ApplicationPackageBuilder().upgradePolicy("canary") .region("us-west-1") .region("us-east-3") .build(); private static final ApplicationPackage defaultApplicationPackage = new ApplicationPackageBuilder().upgradePolicy("default") .region("us-west-1") .region("us-east-3") .build(); private static final ApplicationPackage conservativeApplicationPackage = new ApplicationPackageBuilder().upgradePolicy("conservative") .region("us-west-1") .region("us-east-3") .build(); /** Returns empty prebuilt applications for efficiency */ private ApplicationPackage applicationPackage(String upgradePolicy) { return switch (upgradePolicy) { case "canary" -> canaryApplicationPackage; case "default" -> defaultApplicationPackage; case "conservative" -> conservativeApplicationPackage; default -> throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'"); }; } }
Just `PARALLEL` ?
public void setExecutionMode(String mode) { if ("parallel".equalsIgnoreCase(mode)) { executionMode = OrtSession.SessionOptions.ExecutionMode.PARALLEL; } else if ("sequential".equalsIgnoreCase(mode)) { executionMode = SEQUENTIAL; } }
executionMode = OrtSession.SessionOptions.ExecutionMode.PARALLEL;
public void setExecutionMode(String mode) { if ("parallel".equalsIgnoreCase(mode)) { executionMode = OrtSession.SessionOptions.ExecutionMode.PARALLEL; } else if ("sequential".equalsIgnoreCase(mode)) { executionMode = SEQUENTIAL; } }
class OnnxEvaluatorOptions { private final OrtSession.SessionOptions.OptLevel optimizationLevel; private OrtSession.SessionOptions.ExecutionMode executionMode; private int interOpThreads; private int intraOpThreads; private int gpuDeviceNumber; private boolean gpuDeviceRequired; public OnnxEvaluatorOptions() { optimizationLevel = OrtSession.SessionOptions.OptLevel.ALL_OPT; executionMode = SEQUENTIAL; int quarterVcpu = Math.max(1, (int) Math.ceil(Runtime.getRuntime().availableProcessors() / 4d)); interOpThreads = quarterVcpu; intraOpThreads = quarterVcpu; gpuDeviceNumber = -1; gpuDeviceRequired = false; } public OrtSession.SessionOptions getOptions(boolean loadCuda) throws OrtException { OrtSession.SessionOptions options = new OrtSession.SessionOptions(); options.setOptimizationLevel(optimizationLevel); options.setExecutionMode(executionMode); options.setInterOpNumThreads(executionMode == PARALLEL ? interOpThreads : 1); options.setIntraOpNumThreads(intraOpThreads); if (loadCuda) { options.addCUDA(gpuDeviceNumber); } return options; } public void setInterOpThreads(int threads) { if (threads >= 0) { interOpThreads = threads; } } public void setIntraOpThreads(int threads) { if (threads >= 0) { intraOpThreads = threads; } } public void setGpuDevice(int deviceNumber, boolean required) { this.gpuDeviceNumber = deviceNumber; this.gpuDeviceRequired = required; } public boolean requestingGpu() { return gpuDeviceNumber > -1; } public boolean gpuDeviceRequired() { return gpuDeviceRequired; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; OnnxEvaluatorOptions that = (OnnxEvaluatorOptions) o; return interOpThreads == that.interOpThreads && intraOpThreads == that.intraOpThreads && gpuDeviceNumber == that.gpuDeviceNumber && gpuDeviceRequired == that.gpuDeviceRequired && optimizationLevel == that.optimizationLevel && executionMode == that.executionMode; } @Override public int hashCode() { return Objects.hash(optimizationLevel, executionMode, interOpThreads, intraOpThreads, gpuDeviceNumber, gpuDeviceRequired); } }
class OnnxEvaluatorOptions { private final OrtSession.SessionOptions.OptLevel optimizationLevel; private OrtSession.SessionOptions.ExecutionMode executionMode; private int interOpThreads; private int intraOpThreads; private int gpuDeviceNumber; private boolean gpuDeviceRequired; public OnnxEvaluatorOptions() { optimizationLevel = OrtSession.SessionOptions.OptLevel.ALL_OPT; executionMode = SEQUENTIAL; int quarterVcpu = Math.max(1, (int) Math.ceil(Runtime.getRuntime().availableProcessors() / 4d)); interOpThreads = quarterVcpu; intraOpThreads = quarterVcpu; gpuDeviceNumber = -1; gpuDeviceRequired = false; } public OrtSession.SessionOptions getOptions(boolean loadCuda) throws OrtException { OrtSession.SessionOptions options = new OrtSession.SessionOptions(); options.setOptimizationLevel(optimizationLevel); options.setExecutionMode(executionMode); options.setInterOpNumThreads(executionMode == PARALLEL ? interOpThreads : 1); options.setIntraOpNumThreads(intraOpThreads); if (loadCuda) { options.addCUDA(gpuDeviceNumber); } return options; } public void setInterOpThreads(int threads) { if (threads >= 0) { interOpThreads = threads; } } public void setIntraOpThreads(int threads) { if (threads >= 0) { intraOpThreads = threads; } } public void setGpuDevice(int deviceNumber, boolean required) { this.gpuDeviceNumber = deviceNumber; this.gpuDeviceRequired = required; } public boolean requestingGpu() { return gpuDeviceNumber > -1; } public boolean gpuDeviceRequired() { return gpuDeviceRequired; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; OnnxEvaluatorOptions that = (OnnxEvaluatorOptions) o; return interOpThreads == that.interOpThreads && intraOpThreads == that.intraOpThreads && gpuDeviceNumber == that.gpuDeviceNumber && gpuDeviceRequired == that.gpuDeviceRequired && optimizationLevel == that.optimizationLevel && executionMode == that.executionMode; } @Override public int hashCode() { return Objects.hash(optimizationLevel, executionMode, interOpThreads, intraOpThreads, gpuDeviceNumber, gpuDeviceRequired); } }
Consider using `raw.toString()`
private static String getIdValue(ValueGroupId<?> id) { return (id instanceof RawId raw) ? Base64.getEncoder().withoutPadding().encodeToString(raw.getValue()) : id.getValue().toString(); }
? Base64.getEncoder().withoutPadding().encodeToString(raw.getValue())
private static String getIdValue(ValueGroupId<?> id) { return (id instanceof RawId raw) ? Base64.getEncoder().withoutPadding().encodeToString(raw.getValue()) : id.getValue().toString(); }
class of com.yahoo.search.result.HitGroup, got %s.", list.getClass()); moreChildren(); renderHitGroupHead((HitGroup) list); } protected void moreChildren() throws IOException { if (!renderedChildren.isEmpty()) childrenArray(); renderedChildren.push(0); }
class of com.yahoo.search.result.HitGroup, got %s.", list.getClass()); moreChildren(); renderHitGroupHead((HitGroup) list); } protected void moreChildren() throws IOException { if (!renderedChildren.isEmpty()) childrenArray(); renderedChildren.push(0); }
:raised_eyebrow:
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); }
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase());
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
We use hyphenated rest api paths, afaik, and that can't be an enum name.
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); }
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase());
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
Assuming not including `withoutPadding` is intentional?
public int hashCode() { return Arrays.hashCode(content); }
}
public int hashCode() { return Arrays.hashCode(content); }
class RawBase64 implements Comparable<RawBase64> { private final byte[] content; private final boolean withoutPadding; public RawBase64(byte[] content) { this(content, false); } public RawBase64(byte[] content, boolean withoutPadding) { Objects.requireNonNull(content); this.content = content; this.withoutPadding = withoutPadding; } public byte [] value() { return content; } @Override public int compareTo(RawBase64 rhs) { return Arrays.compareUnsigned(content, rhs.content); } @Override public String toString() { return withoutPadding ? Base64.getEncoder().withoutPadding().encodeToString(content) : Base64.getEncoder().encodeToString(content); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RawBase64 rawBase64 = (RawBase64) o; return Arrays.equals(content, rawBase64.content); } @Override }
class RawBase64 implements Comparable<RawBase64> { private final byte[] content; private final boolean withoutPadding; public RawBase64(byte[] content) { this(content, false); } public RawBase64(byte[] content, boolean withoutPadding) { Objects.requireNonNull(content); this.content = content; this.withoutPadding = withoutPadding; } public byte [] value() { return content; } @Override public int compareTo(RawBase64 rhs) { return Arrays.compareUnsigned(content, rhs.content); } @Override public String toString() { return withoutPadding ? Base64.getEncoder().withoutPadding().encodeToString(content) : Base64.getEncoder().encodeToString(content); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RawBase64 rawBase64 = (RawBase64) o; return Arrays.equals(content, rawBase64.content); } @Override }
You've also changed the default from 1 to 1024 here. That's probably OK? But we need to update some system test if we want this change.
private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) { int wantedDocumentCount = getProperty(request, WANTED_DOCUMENT_COUNT, integerParser) .orElse(streamed ? Integer.MAX_VALUE : 1 << 10); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser); concurrency.ifPresent(value -> { if (value <= 0) throw new IllegalArgumentException("concurrency must be positive"); }); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = parseCommonParameters(request, path, cluster); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.visitInconsistentBuckets(true); long timeoutMs = Math.max(1, request.getTimeout(MILLISECONDS) - handlerTimeout.toMillis()); if (streamed) { StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1); concurrency.ifPresent(throttlePolicy::setMaxPendingCount); parameters.setThrottlePolicy(throttlePolicy); parameters.setTimeoutMs(timeoutMs); } else { parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)))); parameters.setSessionTimeoutMs(timeoutMs); } return parameters; }
.orElse(streamed ? Integer.MAX_VALUE : 1 << 10);
private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) { int wantedDocumentCount = getProperty(request, WANTED_DOCUMENT_COUNT, integerParser) .orElse(streamed ? Integer.MAX_VALUE : 1 << 10); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser); concurrency.ifPresent(value -> { if (value <= 0) throw new IllegalArgumentException("concurrency must be positive"); }); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = parseCommonParameters(request, path, cluster); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.visitInconsistentBuckets(true); long timeoutMs = Math.max(1, request.getTimeout(MILLISECONDS) - handlerTimeout.toMillis()); if (streamed) { StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1); concurrency.ifPresent(throttlePolicy::setMaxPendingCount); parameters.setThrottlePolicy(throttlePolicy); parameters.setTimeoutMs(timeoutMs); } else { parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)))); parameters.setSessionTimeoutMs(timeoutMs); } return parameters; }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
It may be mentioned that the default of 1 in effect had the semantics of "return one bucket's worth of data per request", since you always return all docs for a visited bucket even if it exceeds the wanted document count. Since non-streaming mode has a single pending visitor by default, you'd terminate the visiting after receiving the one bucket.
private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) { int wantedDocumentCount = getProperty(request, WANTED_DOCUMENT_COUNT, integerParser) .orElse(streamed ? Integer.MAX_VALUE : 1 << 10); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser); concurrency.ifPresent(value -> { if (value <= 0) throw new IllegalArgumentException("concurrency must be positive"); }); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = parseCommonParameters(request, path, cluster); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.visitInconsistentBuckets(true); long timeoutMs = Math.max(1, request.getTimeout(MILLISECONDS) - handlerTimeout.toMillis()); if (streamed) { StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1); concurrency.ifPresent(throttlePolicy::setMaxPendingCount); parameters.setThrottlePolicy(throttlePolicy); parameters.setTimeoutMs(timeoutMs); } else { parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)))); parameters.setSessionTimeoutMs(timeoutMs); } return parameters; }
.orElse(streamed ? Integer.MAX_VALUE : 1 << 10);
private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) { int wantedDocumentCount = getProperty(request, WANTED_DOCUMENT_COUNT, integerParser) .orElse(streamed ? Integer.MAX_VALUE : 1 << 10); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser); concurrency.ifPresent(value -> { if (value <= 0) throw new IllegalArgumentException("concurrency must be positive"); }); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = parseCommonParameters(request, path, cluster); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.visitInconsistentBuckets(true); long timeoutMs = Math.max(1, request.getTimeout(MILLISECONDS) - handlerTimeout.toMillis()); if (streamed) { StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1); concurrency.ifPresent(throttlePolicy::setMaxPendingCount); parameters.setThrottlePolicy(throttlePolicy); parameters.setTimeoutMs(timeoutMs); } else { parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)))); parameters.setSessionTimeoutMs(timeoutMs); } return parameters; }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
I noticed that too, will reconsider.
private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) { int wantedDocumentCount = getProperty(request, WANTED_DOCUMENT_COUNT, integerParser) .orElse(streamed ? Integer.MAX_VALUE : 1 << 10); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser); concurrency.ifPresent(value -> { if (value <= 0) throw new IllegalArgumentException("concurrency must be positive"); }); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = parseCommonParameters(request, path, cluster); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.visitInconsistentBuckets(true); long timeoutMs = Math.max(1, request.getTimeout(MILLISECONDS) - handlerTimeout.toMillis()); if (streamed) { StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1); concurrency.ifPresent(throttlePolicy::setMaxPendingCount); parameters.setThrottlePolicy(throttlePolicy); parameters.setTimeoutMs(timeoutMs); } else { parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)))); parameters.setSessionTimeoutMs(timeoutMs); } return parameters; }
.orElse(streamed ? Integer.MAX_VALUE : 1 << 10);
private VisitorParameters parseGetParameters(HttpRequest request, DocumentPath path, boolean streamed) { int wantedDocumentCount = getProperty(request, WANTED_DOCUMENT_COUNT, integerParser) .orElse(streamed ? Integer.MAX_VALUE : 1 << 10); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); Optional<Integer> concurrency = getProperty(request, CONCURRENCY, integerParser); concurrency.ifPresent(value -> { if (value <= 0) throw new IllegalArgumentException("concurrency must be positive"); }); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = parseCommonParameters(request, path, cluster); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.visitInconsistentBuckets(true); long timeoutMs = Math.max(1, request.getTimeout(MILLISECONDS) - handlerTimeout.toMillis()); if (streamed) { StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1); concurrency.ifPresent(throttlePolicy::setMaxPendingCount); parameters.setThrottlePolicy(throttlePolicy); parameters.setTimeoutMs(timeoutMs); } else { parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(Math.min(100, concurrency.orElse(1)))); parameters.setSessionTimeoutMs(timeoutMs); } return parameters; }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
I clean the group which will be merged, so it will be removed at clean empty group
private void mergeGroup(Group srcGroup, Group dstGroup) { if (srcGroup == rootGroup) { rootGroup = dstGroup; } List<GroupExpression> needReinsertedExpressions = Lists.newArrayList(); for (Iterator<Map.Entry<GroupExpression, GroupExpression>> iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) { GroupExpression groupExpr = iterator.next().getKey(); int referSrcGroupIndex = -1; for (int i = 0; i < groupExpr.getInputs().size(); i++) { if (groupExpr.getInputs().get(i) == srcGroup) { referSrcGroupIndex = i; break; } } if (referSrcGroupIndex >= 0) { iterator.remove(); groupExpr.getInputs().set(referSrcGroupIndex, dstGroup); needReinsertedExpressions.add(groupExpr); } if (groupExpr.getGroup() == srcGroup) { groupExpr.setGroup(dstGroup); } } for (GroupExpression groupExpression : needReinsertedExpressions) { if (!groupExpressions.containsKey(groupExpression)) { groupExpressions.put(groupExpression, groupExpression); } else { groupExpression.getGroup().removeGroupExpression(groupExpression); groupExpression.setUnused(true); GroupExpression existGroupExpression = groupExpressions.get(groupExpression); if (!needMerge(groupExpression.getGroup(), existGroupExpression.getGroup())) { groupExpression.getGroup().replaceBestExpression(groupExpression, existGroupExpression); } } } dstGroup.mergeGroup(srcGroup); List<Group> groups = getAllEmptyGroups(); for (Group group : groups) { removeOneGroup(group); } }
private void mergeGroup(Group srcGroup, Group dstGroup) { groups.remove(srcGroup); if (srcGroup == rootGroup) { rootGroup = dstGroup; } List<GroupExpression> needReinsertedExpressions = Lists.newArrayList(); for (Iterator<Map.Entry<GroupExpression, GroupExpression>> iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) { GroupExpression groupExpr = iterator.next().getKey(); int referSrcGroupIndex = -1; for (int i = 0; i < groupExpr.getInputs().size(); i++) { if (groupExpr.getInputs().get(i) == srcGroup) { referSrcGroupIndex = i; break; } } if (referSrcGroupIndex >= 0) { iterator.remove(); groupExpr.getInputs().set(referSrcGroupIndex, dstGroup); needReinsertedExpressions.add(groupExpr); } if (groupExpr.getGroup() == srcGroup) { groupExpr.setGroup(dstGroup); } } for (GroupExpression groupExpression : needReinsertedExpressions) { if (!groupExpressions.containsKey(groupExpression)) { groupExpressions.put(groupExpression, groupExpression); } else { groupExpression.getGroup().removeGroupExpression(groupExpression); groupExpression.setUnused(true); GroupExpression existGroupExpression = groupExpressions.get(groupExpression); if (!needMerge(groupExpression.getGroup(), existGroupExpression.getGroup())) { groupExpression.getGroup().replaceBestExpression(groupExpression, existGroupExpression); } } } dstGroup.mergeGroup(srcGroup); List<Group> groups = getAllEmptyGroups(); for (Group group : groups) { removeOneGroup(group); } }
class Memo { private static final Logger LOG = LogManager.getLogger(Memo.class); private int nextGroupId = 0; private final List<Group> groups; private Group rootGroup; /** * The map value is root group id for the GroupExpression. * We need to store group id because when {@see insertGroupExpression} * we need to get existed group id for tmp GroupExpression, * which doesn't have group id info */ private final Map<GroupExpression, GroupExpression> groupExpressions; public List<Group> getGroups() { return groups; } public Map<GroupExpression, GroupExpression> getGroupExpressions() { return groupExpressions; } public Memo() { groups = Lists.newLinkedList(); groupExpressions = Maps.newHashMap(); } public Group getRootGroup() { return rootGroup; } /** * Copy an expression into search space, this function will add an GroupExpression for * this Expression. If this Expression has children, this function will be called * recursively to create GroupExpression and Group for every single Expression * For example, Join(Scan(A), Scan(B)) will create 3 Groups and GroupExpressions for Join, * Scan(A) and Scan(B). * We return GroupExpression rather than Group because we can get Group from GroupExpression */ public GroupExpression init(OptExpression originExpression) { Preconditions.checkState(groups.size() == 0); Preconditions.checkState(groupExpressions.size() == 0); GroupExpression rootGroupExpression = copyIn(null, originExpression).second; rootGroup = rootGroupExpression.getGroup(); return rootGroupExpression; } public Pair<Boolean, GroupExpression> insertGroupExpression(GroupExpression groupExpression, Group targetGroup) { if (groupExpressions.get(groupExpression) != null) { GroupExpression existedGroupExpression = groupExpressions.get(groupExpression); Group existedGroup = existedGroupExpression.getGroup(); if (needMerge(targetGroup, existedGroup)) { mergeGroup(existedGroup, targetGroup); } return new Pair<>(false, existedGroupExpression); } if (targetGroup == null) { targetGroup = newGroup(); groups.add(targetGroup); } groupExpressions.put(groupExpression, groupExpression); targetGroup.addExpression(groupExpression); return new Pair<>(true, groupExpression); } /** * Insert an enforce expression into the target group. */ public void insertEnforceExpression(GroupExpression groupExpression, Group targetGroup) { groupExpression.setGroup(targetGroup); } private Group newGroup() { return new Group(nextGroupId++); } public Pair<Boolean, GroupExpression> copyIn(Group targetGroup, OptExpression expression) { List<Group> inputs = Lists.newArrayList(); for (OptExpression input : expression.getInputs()) { Group group; if (input.getGroupExpression() != null) { group = input.getGroupExpression().getGroup(); } else { group = copyIn(null, input).second.getGroup(); } Preconditions.checkState(group != null); Preconditions.checkState(group != targetGroup); inputs.add(group); } GroupExpression groupExpression = new GroupExpression(expression.getOp(), inputs); Pair<Boolean, GroupExpression> result = insertGroupExpression(groupExpression, targetGroup); if (result.first && targetGroup == null) { Preconditions.checkState(result.second.getOp().isLogical()); result.second.deriveLogicalPropertyItself(); result.second.getGroup().setStatistics(expression.getStatistics()); } return result; } private boolean needMerge(Group targetGroup, Group existedGroup) { return targetGroup != null && targetGroup != existedGroup; } private List<Group> getAllEmptyGroups() { List<Group> groups = Lists.newArrayList(); for (Group group : getGroups()) { if (group.isEmpty()) { groups.add(group); continue; } for (Group childGroup : group.getFirstLogicalExpression().getInputs()) { if (childGroup.isEmpty()) { groups.add(childGroup); break; } } } return groups; } public void removeAllEmptyGroup() { List<Group> groups = getAllEmptyGroups(); while (!groups.isEmpty()) { for (Group group : groups) { removeOneGroup(group); } groups = getAllEmptyGroups(); } } private void removeOneGroup(Group group) { groups.remove(group); for (Iterator<Map.Entry<GroupExpression, GroupExpression>> iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) { GroupExpression groupExpr = iterator.next().getKey(); if (groupExpr.getGroup() == group) { iterator.remove(); continue; } for (int i = 0; i < groupExpr.getInputs().size(); i++) { if (groupExpr.getInputs().get(i) == group) { groupExpr.getGroup().removeGroupExpression(groupExpr); iterator.remove(); break; } } } } private void deepSearchGroup(Group root, LinkedList<Integer> touch) { for (Group group : root.getFirstLogicalExpression().getInputs()) { touch.add(group.getId()); deepSearchGroup(group, touch); } } /** * When performing replaceRewriteExpression, some groups may not be reachable by rootGroup. * These groups should be replaced. * In order to reduce the number of groups entering Memo, * we will delete inaccessible groups in this function. */ public void removeUnreachableGroup() { LinkedList<Integer> touch = new LinkedList<>(); touch.add(rootGroup.getId()); deepSearchGroup(rootGroup, touch); List<Group> groupsCopy = new ArrayList<>(groups); for (Group group : groupsCopy) { if (!touch.contains(group.getId())) { removeOneGroup(group); } } } public void replaceRewriteExpression(Group targetGroup, OptExpression expression) { removeGroupInitLogicExpression(targetGroup); GroupExpression groupExpression = copyIn(targetGroup, expression).second; groupExpression.deriveLogicalPropertyItself(); } private void removeGroupInitLogicExpression(Group group) { GroupExpression initGroupExpression = group.getFirstLogicalExpression(); groupExpressions.remove(initGroupExpression); Preconditions.checkState(group.isValidInitState()); group.getLogicalExpressions().clear(); } public void deriveAllGroupLogicalProperty() { getRootGroup().getFirstLogicalExpression().deriveLogicalPropertyRecursively(); } }
class Memo { private static final Logger LOG = LogManager.getLogger(Memo.class); private int nextGroupId = 0; private final List<Group> groups; private Group rootGroup; /** * The map value is root group id for the GroupExpression. * We need to store group id because when {@see insertGroupExpression} * we need to get existed group id for tmp GroupExpression, * which doesn't have group id info */ private final Map<GroupExpression, GroupExpression> groupExpressions; public List<Group> getGroups() { return groups; } public Map<GroupExpression, GroupExpression> getGroupExpressions() { return groupExpressions; } public Memo() { groups = Lists.newLinkedList(); groupExpressions = Maps.newHashMap(); } public Group getRootGroup() { return rootGroup; } /** * Copy an expression into search space, this function will add an GroupExpression for * this Expression. If this Expression has children, this function will be called * recursively to create GroupExpression and Group for every single Expression * For example, Join(Scan(A), Scan(B)) will create 3 Groups and GroupExpressions for Join, * Scan(A) and Scan(B). * We return GroupExpression rather than Group because we can get Group from GroupExpression */ public GroupExpression init(OptExpression originExpression) { Preconditions.checkState(groups.size() == 0); Preconditions.checkState(groupExpressions.size() == 0); GroupExpression rootGroupExpression = copyIn(null, originExpression).second; rootGroup = rootGroupExpression.getGroup(); return rootGroupExpression; } public Pair<Boolean, GroupExpression> insertGroupExpression(GroupExpression groupExpression, Group targetGroup) { if (groupExpressions.get(groupExpression) != null) { GroupExpression existedGroupExpression = groupExpressions.get(groupExpression); Group existedGroup = existedGroupExpression.getGroup(); if (needMerge(targetGroup, existedGroup)) { mergeGroup(existedGroup, targetGroup); } return new Pair<>(false, existedGroupExpression); } if (targetGroup == null) { targetGroup = newGroup(); groups.add(targetGroup); } groupExpressions.put(groupExpression, groupExpression); targetGroup.addExpression(groupExpression); return new Pair<>(true, groupExpression); } /** * Insert an enforce expression into the target group. */ public void insertEnforceExpression(GroupExpression groupExpression, Group targetGroup) { groupExpression.setGroup(targetGroup); } private Group newGroup() { return new Group(nextGroupId++); } public Pair<Boolean, GroupExpression> copyIn(Group targetGroup, OptExpression expression) { List<Group> inputs = Lists.newArrayList(); for (OptExpression input : expression.getInputs()) { Group group; if (input.getGroupExpression() != null) { group = input.getGroupExpression().getGroup(); } else { group = copyIn(null, input).second.getGroup(); } Preconditions.checkState(group != null); Preconditions.checkState(group != targetGroup); inputs.add(group); } GroupExpression groupExpression = new GroupExpression(expression.getOp(), inputs); Pair<Boolean, GroupExpression> result = insertGroupExpression(groupExpression, targetGroup); if (result.first && targetGroup == null) { Preconditions.checkState(result.second.getOp().isLogical()); result.second.deriveLogicalPropertyItself(); result.second.getGroup().setStatistics(expression.getStatistics()); } return result; } private boolean needMerge(Group targetGroup, Group existedGroup) { return targetGroup != null && targetGroup != existedGroup; } private List<Group> getAllEmptyGroups() { List<Group> groups = Lists.newArrayList(); for (Group group : getGroups()) { if (group.isEmpty()) { groups.add(group); continue; } for (Group childGroup : group.getFirstLogicalExpression().getInputs()) { if (childGroup.isEmpty()) { groups.add(childGroup); break; } } } return groups; } public void removeAllEmptyGroup() { List<Group> groups = getAllEmptyGroups(); while (!groups.isEmpty()) { for (Group group : groups) { removeOneGroup(group); } groups = getAllEmptyGroups(); } } private void removeOneGroup(Group group) { groups.remove(group); for (Iterator<Map.Entry<GroupExpression, GroupExpression>> iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) { GroupExpression groupExpr = iterator.next().getKey(); if (groupExpr.getGroup() == group) { iterator.remove(); continue; } for (int i = 0; i < groupExpr.getInputs().size(); i++) { if (groupExpr.getInputs().get(i) == group) { groupExpr.getGroup().removeGroupExpression(groupExpr); iterator.remove(); break; } } } } private void deepSearchGroup(Group root, LinkedList<Integer> touch) { for (Group group : root.getFirstLogicalExpression().getInputs()) { touch.add(group.getId()); deepSearchGroup(group, touch); } } /** * When performing replaceRewriteExpression, some groups may not be reachable by rootGroup. * These groups should be replaced. * In order to reduce the number of groups entering Memo, * we will delete inaccessible groups in this function. */ public void removeUnreachableGroup() { LinkedList<Integer> touch = new LinkedList<>(); touch.add(rootGroup.getId()); deepSearchGroup(rootGroup, touch); List<Group> groupsCopy = new ArrayList<>(groups); for (Group group : groupsCopy) { if (!touch.contains(group.getId())) { removeOneGroup(group); } } } public void replaceRewriteExpression(Group targetGroup, OptExpression expression) { removeGroupInitLogicExpression(targetGroup); GroupExpression groupExpression = copyIn(targetGroup, expression).second; groupExpression.deriveLogicalPropertyItself(); } private void removeGroupInitLogicExpression(Group group) { GroupExpression initGroupExpression = group.getFirstLogicalExpression(); groupExpressions.remove(initGroupExpression); Preconditions.checkState(group.isValidInitState()); group.getLogicalExpressions().clear(); } public void deriveAllGroupLogicalProperty() { getRootGroup().getFirstLogicalExpression().deriveLogicalPropertyRecursively(); } }
"Inconsistent state" may cause the reader to think the Vespa team messed up, whereas this may occur naturally through topology changes. Rephrase?
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if ((numInitial > 0 && numNoReport > 0) || (numReadied > 0 && (numNoReport > 0 || numInitial > 0 || numDropped > 0)) || (numStarted > 0 && (numInitial > 0 || numDropped > 0))) return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again");
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if (numInitial + numDropped > 0 && numNoReport + numReadied + numStarted > 0) return ErrorResponse.conflict("Last dropping of documents may have failed to clear all documents due " + "to concurrent topology changes, consider retrying"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
```suggestion if (((numInitial + numDropped) > 0 && numNoReport > 0) || ```
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if ((numInitial > 0 && numNoReport > 0) || (numReadied > 0 && (numNoReport > 0 || numInitial > 0 || numDropped > 0)) || (numStarted > 0 && (numInitial > 0 || numDropped > 0))) return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
if ((numInitial > 0 && numNoReport > 0) ||
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if (numInitial + numDropped > 0 && numNoReport + numReadied + numStarted > 0) return ErrorResponse.conflict("Last dropping of documents may have failed to clear all documents due " + "to concurrent topology changes, consider retrying"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
AFAIK, my suggestion is a no-op because numDropped>0 => numInitial>0, because otherwise all nodes are redied. But it appears more logical to me.
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if ((numInitial > 0 && numNoReport > 0) || (numReadied > 0 && (numNoReport > 0 || numInitial > 0 || numDropped > 0)) || (numStarted > 0 && (numInitial > 0 || numDropped > 0))) return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
if ((numInitial > 0 && numNoReport > 0) ||
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if (numInitial + numDropped > 0 && numNoReport + numReadied + numStarted > 0) return ErrorResponse.conflict("Last dropping of documents may have failed to clear all documents due " + "to concurrent topology changes, consider retrying"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
This seems redundant, as the name does not add any value(?)
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: storageMaintainer.syncLogs(context, true); removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container, false); var runOrdinaryWireguardTasks = true; if (container.isPresent() && container.get().state().isRunning()) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); runOrdinaryWireguardTasks = false; } if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } dropDocsIfNeeded(context, container); container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; runOrdinaryWireguardTasks = true; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (runOrdinaryWireguardTasks) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); } startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources().equalsCpu(getContainerResources(context))) throw ConvergenceException.ofTransient("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } serviceDumper.processServiceDumpRequest(context); updateNodeRepoWithCurrentAttributes(context, container.map(Container::createdAt)); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name()); } }
Optional<Container> finalContainer = container;
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: storageMaintainer.syncLogs(context, true); removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container, false); var runOrdinaryWireguardTasks = true; if (container.isPresent() && container.get().state().isRunning()) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); runOrdinaryWireguardTasks = false; } if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } dropDocsIfNeeded(context, container); container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; runOrdinaryWireguardTasks = true; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (runOrdinaryWireguardTasks) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); } startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources().equalsCpu(getContainerResources(context))) throw ConvergenceException.ofTransient("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } serviceDumper.processServiceDumpRequest(context); updateNodeRepoWithCurrentAttributes(context, container.map(Container::createdAt)); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name()); } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private final VespaServiceDumper serviceDumper; private final List<ContainerWireguardTask> wireguardTasks; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION, serviceDumper, wireguardTasks); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); this.serviceDumper = serviceDumper; this.wireguardTasks = new ArrayList<>(wireguardTasks); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { converge(contextSupplier.nextContext()); } catch (ContextSupplierInterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Invoking vespa-nodectl to start services"); String output = containerOperations.startServices(context); if (!output.isBlank()) { context.log(logger, "Start services output: " + output); } hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, "Invoking vespa-nodectl to resume services"); String output = containerOperations.resumeNode(context); if (!output.isBlank()) { context.log(logger, "Resume services output: " + output); } hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context, Optional<Instant> containerCreatedAt) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); boolean changed = false; if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); changed = true; } boolean createdAtAfterRebootedEvent = context.node().events().stream() .filter(event -> event.type().equals("rebooted")) .map(event -> containerCreatedAt .map(createdAt -> createdAt.isAfter(event.at())) .orElse(false)) .findFirst() .orElse(containerCreatedAt.isPresent()); if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration) || createdAtAfterRebootedEvent) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); changed = true; } Optional<DockerImage> wantedDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), wantedDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = wantedDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(context.node().currentVespaVersion().orElse(Version.emptyVersion)); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); changed = true; } Optional<DropDocumentsReport> report = context.node().reports().getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isPresent() && report.get().startedAt() == null && report.get().readiedAt() != null) { newNodeAttributes.withReport(DropDocumentsReport.reportId(), report.get().withStartedAt(clock.millis()).toJsonNode()); changed = true; } if (changed) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentNodeAttributes, newNodeAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newNodeAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); ContainerData containerData = containerOperations.createContainer(context, wantedResources); writeContainerData(context, containerData); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason); orchestratorSuspendNode(context); ContainerResources currentResources = existingContainer.get().resources(); ContainerResources wantedResources = currentResources.withUnlimitedCpus(); if ( ! warmUpDuration(context).isNegative() && ! wantedResources.equals(currentResources)) { context.log(logger, "Updating container resources: %s -> %s", existingContainer.get().resources().toStringCpu(), wantedResources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.get().id(), wantedResources); } String output = containerOperations.restartVespa(context); if ( ! output.isBlank()) { context.log(logger, "Restart services output: " + output); } currentRestartGeneration = context.node().wantedRestartGeneration(); firstSuccessfulHealthCheckInstant = Optional.empty(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state().isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { if (containerState == ABSENT) return; try { hasResumedNode = false; context.log(logger, "Invoking vespa-nodectl to suspend services"); String output = containerOperations.suspendNode(context); if (!output.isBlank()) { context.log(logger, "Suspend services output: " + output); } } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image())) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image().asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state().isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources())) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources().toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state().isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() == NodeState.active) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources())) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources().toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources().memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev; } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } private void dropDocsIfNeeded(NodeAgentContext context, Optional<Container> container) { Optional<DropDocumentsReport> report = context.node().reports() .getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isEmpty() || report.get().readiedAt() != null) return; if (report.get().droppedAt() == null) { container.ifPresent(c -> removeContainer(context, c, List.of("Dropping documents"), true)); FileFinder.from(context.paths().underVespaHome("var/db/vespa/search")).deleteRecursively(context); nodeRepository.updateNodeAttributes(context.node().hostname(), new NodeAttributes().withReport(DropDocumentsReport.reportId(), report.get().withDroppedAt(clock.millis()).toJsonNode())); } throw ConvergenceException.ofTransient("Documents already dropped, waiting for signal to start the container"); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); if (e.isError()) numberOfUnhandledException++; } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration.dividedBy(zone.getSystemName().isCd() ? 3 : 1); } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private final VespaServiceDumper serviceDumper; private final List<ContainerWireguardTask> wireguardTasks; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION, serviceDumper, wireguardTasks); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); this.serviceDumper = serviceDumper; this.wireguardTasks = new ArrayList<>(wireguardTasks); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { converge(contextSupplier.nextContext()); } catch (ContextSupplierInterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Invoking vespa-nodectl to start services"); String output = containerOperations.startServices(context); if (!output.isBlank()) { context.log(logger, "Start services output: " + output); } hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, "Invoking vespa-nodectl to resume services"); String output = containerOperations.resumeNode(context); if (!output.isBlank()) { context.log(logger, "Resume services output: " + output); } hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context, Optional<Instant> containerCreatedAt) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); boolean changed = false; if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); changed = true; } boolean createdAtAfterRebootedEvent = context.node().events().stream() .filter(event -> event.type().equals("rebooted")) .map(event -> containerCreatedAt .map(createdAt -> createdAt.isAfter(event.at())) .orElse(false)) .findFirst() .orElse(containerCreatedAt.isPresent()); if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration) || createdAtAfterRebootedEvent) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); changed = true; } Optional<DockerImage> wantedDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), wantedDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = wantedDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(context.node().currentVespaVersion().orElse(Version.emptyVersion)); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); changed = true; } Optional<DropDocumentsReport> report = context.node().reports().getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isPresent() && report.get().startedAt() == null && report.get().readiedAt() != null) { newNodeAttributes.withReport(DropDocumentsReport.reportId(), report.get().withStartedAt(clock.millis()).toJsonNode()); changed = true; } if (changed) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentNodeAttributes, newNodeAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newNodeAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); ContainerData containerData = containerOperations.createContainer(context, wantedResources); writeContainerData(context, containerData); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason); orchestratorSuspendNode(context); ContainerResources currentResources = existingContainer.get().resources(); ContainerResources wantedResources = currentResources.withUnlimitedCpus(); if ( ! warmUpDuration(context).isNegative() && ! wantedResources.equals(currentResources)) { context.log(logger, "Updating container resources: %s -> %s", existingContainer.get().resources().toStringCpu(), wantedResources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.get().id(), wantedResources); } String output = containerOperations.restartVespa(context); if ( ! output.isBlank()) { context.log(logger, "Restart services output: " + output); } currentRestartGeneration = context.node().wantedRestartGeneration(); firstSuccessfulHealthCheckInstant = Optional.empty(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state().isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { if (containerState == ABSENT) return; try { hasResumedNode = false; context.log(logger, "Invoking vespa-nodectl to suspend services"); String output = containerOperations.suspendNode(context); if (!output.isBlank()) { context.log(logger, "Suspend services output: " + output); } } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image())) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image().asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state().isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources())) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources().toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state().isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() == NodeState.active) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources())) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources().toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources().memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev; } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } private void dropDocsIfNeeded(NodeAgentContext context, Optional<Container> container) { Optional<DropDocumentsReport> report = context.node().reports() .getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isEmpty() || report.get().readiedAt() != null) return; if (report.get().droppedAt() == null) { container.ifPresent(c -> removeContainer(context, c, List.of("Dropping documents"), true)); FileFinder.from(context.paths().underVespaHome("var/db/vespa/search")).deleteRecursively(context); nodeRepository.updateNodeAttributes(context.node().hostname(), new NodeAttributes().withReport(DropDocumentsReport.reportId(), report.get().withDroppedAt(clock.millis()).toJsonNode())); } throw ConvergenceException.ofTransient("Documents already dropped, waiting for signal to start the container"); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); if (e.isError()) numberOfUnhandledException++; } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration.dividedBy(zone.getSystemName().isCd() ? 3 : 1); } }
If numReadied = 1, numNoReport = 1, and numStarted = size - 2. Why is this inconsistent?
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if ((numInitial > 0 && numNoReport > 0) || (numReadied > 0 && (numNoReport > 0 || numInitial > 0 || numDropped > 0)) || (numStarted > 0 && (numInitial > 0 || numDropped > 0))) return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
(numReadied > 0 && (numNoReport > 0 || numInitial > 0 || numDropped > 0)) ||
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if (numInitial + numDropped > 0 && numNoReport + numReadied + numStarted > 0) return ErrorResponse.conflict("Last dropping of documents may have failed to clear all documents due " + "to concurrent topology changes, consider retrying"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
"Last dropping of documents may have failed to clear all documents due to concurrent topology changes, consider retrying."
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if ((numInitial > 0 && numNoReport > 0) || (numReadied > 0 && (numNoReport > 0 || numInitial > 0 || numDropped > 0)) || (numStarted > 0 && (numInitial > 0 || numDropped > 0))) return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
return ErrorResponse.conflict("Inconsistent state, try restarting drop documents again");
private HttpResponse dropDocumentsStatus(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); NodeFilter filters = NodeFilter.all() .states(Node.State.active) .applications(applicationId) .clusterTypes(Node.ClusterType.content, Node.ClusterType.combined); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, clusterId.map(filters::clusterIds).orElse(filters)); if (nodes.isEmpty()) { throw new NotExistsException("No content nodes found for %s%s in %s".formatted( applicationId.toFullString(), clusterId.map(id -> " cluster " + id).orElse(""), zone)); } Instant readiedAt = null; int numNoReport = 0, numInitial = 0, numDropped = 0, numReadied = 0, numStarted = 0; for (Node node : nodes) { Inspector report = Optional.ofNullable(node.reports().get("dropDocuments")) .map(json -> SlimeUtils.jsonToSlime(json).get()).orElse(null); if (report == null) numNoReport++; else if (report.field("startedAt").valid()) { numStarted++; readiedAt = SlimeUtils.instant(report.field("readiedAt")); } else if (report.field("readiedAt").valid()) numReadied++; else if (report.field("droppedAt").valid()) numDropped++; else numInitial++; } if (numInitial + numDropped > 0 && numNoReport + numReadied + numStarted > 0) return ErrorResponse.conflict("Last dropping of documents may have failed to clear all documents due " + "to concurrent topology changes, consider retrying"); Slime slime = new Slime(); Cursor root = slime.setObject(); if (numStarted + numNoReport == nodes.size()) { if (readiedAt != null) root.setLong("lastDropped", readiedAt.toEpochMilli()); } else { Cursor progress = root.setObject("progress"); progress.setLong("total", nodes.size()); progress.setLong("dropped", numDropped); progress.setLong("started", numStarted + numNoReport); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND -> ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT -> new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR -> ErrorResponses.logThrowing(request, log, e); default -> new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/private-services")) return getPrivateServiceInfo(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocumentsStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics/searchnode")) return searchNodeMetrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/info/resend-mail-verification")) return withCloudTenant(path.get("tenant"), request, this::resendEmailVerification); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform-pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application-pin")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/drop-documents")) return dropDocuments(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploySystemApplication(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).toList(), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var slime = new Slime(); var cursor = slime.setObject(); try { var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) cursor.setBool("managedAccess", false); } return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); try { controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } catch (ZmsClientException e) { if (e.getErrorCode() == 404) return ErrorResponse.conflict("Configuration not yet ready, please try again in a few minutes"); throw e; } } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email().getEmailAddress()); infoCursor.setBool("contactEmailVerified", info.contact().email().isVerified()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email().getEmailAddress()); contact.setBool("emailVerified", info.contact().email().isVerified()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedEmail = optional("email", inspector.field("contact")) .filter(address -> !address.equals(info.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(cloudTenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(info.contact().email()); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(mergedEmail); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("company"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email().getEmailAddress()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), cloudTenant.name(), contact, false); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.name(), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().getEmailAddress().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().getEmailAddress().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email().getEmailAddress()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email().getEmailAddress()); contactCursor.setBool("emailVerified", email.email().isVerified()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { var string = field.valid() ? field.asString().trim() : defaultVale; if (string.length() > 512) throw new IllegalArgumentException("Input value too long"); return string; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); var mergedEmail = optional("contactEmail", insp) .filter(address -> !address.equals(oldInfo.contact().email().getEmailAddress())) .map(address -> { controller.mailVerifier().sendMailVerification(tenant.name(), address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldInfo.contact().email()); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(mergedEmail); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), tenant.name(), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), tenant.name(), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantName tenantName, TenantContact oldContact, boolean isBillingContact) { if (!insp.valid()) return oldContact; var mergedEmail = optional("email", insp) .filter(address -> !address.equals(oldContact.email().getEmailAddress())) .map(address -> { if (isBillingContact) return new Email(address, true); controller.mailVerifier().sendMailVerification(tenantName, address, PendingMailVerification.MailType.TENANT_CONTACT); return new Email(address, false); }) .orElse(oldContact.email()); return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(mergedEmail) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantName tenantName, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, tenantName, oldContact.contact(), true)) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantName tenantName, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); return oldContacts.ofType(TenantContacts.EmailContact.class) .stream() .filter(contact -> contact.email().getEmailAddress().equals(email)) .findAny() .map(emailContact -> new TenantContacts.EmailContact(audiences, emailContact.email())) .orElseGet(() -> { controller.mailVerifier().sendMailVerification(tenantName, email, PendingMailVerification.MailType.NOTIFICATIONS); return new TenantContacts.EmailContact(audiences, new Email(email, false)); }); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); if ( ! cluster.groupSize().isEmpty()) toSlime(cluster.groupSize(), clusterObject.setObject("groupSize")); toSlime(cluster.current(), clusterObject.setObject("current")); toSlime(cluster.target(), clusterObject.setObject("target")); toSlime(cluster.suggested(), clusterObject.setObject("suggested")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse searchNodeMetrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<SearchNodeMetrics> searchNodeMetrics = controller.serviceRegistry().configServer().getSearchNodeMetrics(deployment); return buildResponseFromSearchNodeMetrics(searchNodeMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromSearchNodeMetrics(List<SearchNodeMetrics> searchnodeMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (SearchNodeMetrics metrics : searchnodeMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private SlimeJsonResponse resendEmailVerification(CloudTenant tenant, Inspector inspector) { var mail = mandatory("mail", inspector).asString(); var type = mandatory("mailType", inspector).asString(); var mailType = switch (type) { case "contact" -> PendingMailVerification.MailType.TENANT_CONTACT; case "notifications" -> PendingMailVerification.MailType.NOTIFICATIONS; default -> throw new IllegalArgumentException("Unknown mail type " + type); }; var pendingVerification = controller.mailVerifier().resendMailVerification(tenant.name(), mail, mailType); return pendingVerification.isPresent() ? new MessageResponse("Re-sent verification mail to " + mail) : ErrorResponse.notFoundError("No pending mail verification found for " + mail); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); addAvailabilityZone(deploymentObject, deployment.zone()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); addAvailabilityZone(response, deployment.zone()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> { Cursor enclave = response.setObject("enclave"); enclave.setString("cloudAccount", cloudAccount.value()); controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value())); }); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if ( ! stepStatus.readiness(instance.change()).okAt(controller.clock().instant())) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ? controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) : controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false)) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private String serviceTypeIn(DeploymentId id) { CloudName cloud = controller.zoneRegistry().zones().all().get(id.zoneId()).get().getCloudName(); if (CloudName.AWS.equals(cloud)) return "aws-private-link"; if (CloudName.GCP.equals(cloud)) return "gcp-service-connect"; return "unknown"; } private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(id.applicationId(), id.zoneId()); Slime slime = new Slime(); Cursor lbArray = slime.setObject().setArray("privateServices"); for (LoadBalancer lb : lbs) { Cursor serviceObject = lbArray.addObject(); serviceObject.setString("cluster", lb.cluster().value()); lb.service().ifPresent(service -> { serviceObject.setString("serviceId", service.id()); serviceObject.setString("type", serviceTypeIn(id)); Cursor urnsArray = serviceObject.setArray("allowedUrns"); for (AllowedUrn urn : service.allowedUrns()) { Cursor urnObject = urnsArray.addObject(); urnObject.setString("type", switch (urn.type()) { case awsPrivateLink -> "aws-private-link"; case gcpServiceConnect -> "gcp-service-connect"; }); urnObject.setString("urn", urn.urn()); } Cursor endpointsArray = serviceObject.setArray("endpoints"); controller.serviceRegistry().vpcEndpointService() .getConnections(new ClusterId(id, lb.cluster()), lb.cloudAccount()) .forEach(endpoint -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("endpointId", endpoint.endpointId()); endpointObject.setString("state", endpoint.stateValue().name()); endpointObject.setString("detail", endpoint.stateString()); }); }); } return new SlimeJsonResponse(slime); } private HttpResponse dropDocuments(String tenant, String application, String instance, String environment, String region, Optional<ClusterSpec.Id> clusterId) { ZoneId zone = ZoneId.from(environment, region); if (!zone.environment().isManuallyDeployed()) throw new IllegalArgumentException("Drop documents status is only available for manually deployed environments"); ApplicationId applicationId = ApplicationId.from(tenant, application, instance); controller.serviceRegistry().configServer().nodeRepository().dropDocuments(zone, applicationId, clusterId); return new MessageResponse("Triggered drop documents for " + applicationId.toFullString() + clusterId.map(id -> " and cluster " + id).orElse("") + " in " + zone); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPlatformPinned()); root.setBool("platform-pinned", instance.change().isPlatformPinned()); root.setBool("application-pinned", instance.change().isRevisionPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), new Email(user.email(), true))); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if ( ! versionStatus.isActive(version) && ! isOperator(request)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPlatformPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); if (pin) change = change.withRevisionPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.replaceAll("-", "_").toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed, "reindexing triggered by " + requireUserPrincipal(request).getName()); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); status.cause().ifPresent(cause -> statusObject.setString("cause", cause)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploySystemApplication(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || ! systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if ( ! vespaVersion.isEmpty()) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } DeploymentResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("message", "Deployed " + systemApplication.get() + " in " + zone + " on " + systemVersion.get().versionNumber()); Cursor logArray = root.setArray("prepareMessages"); for (LogEntry logMessage : result.log()) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.epochMillis()); logObject.setString("level", logMessage.level().getName()); logObject.setString("message", logMessage.message()); } return new SlimeJsonResponse(slime); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && ! isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void toSlime(IntRange range, Cursor object) { range.from().ifPresent(from -> object.setLong("from", from)); range.to().ifPresent(to -> object.setLong("to", to)); } private void toSlime(Cluster.Autoscaling autoscaling, Cursor autoscalingObject) { autoscalingObject.setString("status", autoscaling.status()); autoscalingObject.setString("description", autoscaling.description()); autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources"))); autoscalingObject.setLong("at", autoscaling.at().toEpochMilli()); toSlime(autoscaling.peak(), autoscalingObject.setObject("peak")); toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal")); } private void toSlime(Load load, Cursor loadObject) { loadObject.setDouble("cpu", load.cpu()); loadObject.setDouble("memory", load.memory()); loadObject.setDouble("disk", load.disk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.getOrDefault(EnvironmentResource.APPLICATION_TEST_ZIP, new byte[0]); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); TenantName tenantName = TenantName.from(tenant); controller.applications().verifyPlan(tenantName); controller.applications().verifyApplicationIdentityConfiguration(tenantName, Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private void addAvailabilityZone(Cursor object, ZoneId zoneId) { ZoneApi zone = controller.zoneRegistry().get(zoneId); if (!zone.getCloudName().equals(CloudName.AWS)) return; object.setString("availabilityZone", zone.getCloudNativeAvailabilityZone()); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
Unfortunately, Java requires variables referenced in lambdas to be effectively final :-(
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: storageMaintainer.syncLogs(context, true); removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container, false); var runOrdinaryWireguardTasks = true; if (container.isPresent() && container.get().state().isRunning()) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); runOrdinaryWireguardTasks = false; } if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } dropDocsIfNeeded(context, container); container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; runOrdinaryWireguardTasks = true; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (runOrdinaryWireguardTasks) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); } startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources().equalsCpu(getContainerResources(context))) throw ConvergenceException.ofTransient("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } serviceDumper.processServiceDumpRequest(context); updateNodeRepoWithCurrentAttributes(context, container.map(Container::createdAt)); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name()); } }
Optional<Container> finalContainer = container;
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.currentRestartGeneration(); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); lastNode = node; } switch (node.state()) { case ready: case reserved: case failed: case inactive: case parked: storageMaintainer.syncLogs(context, true); removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); stopServicesIfNeeded(context); break; case active: storageMaintainer.syncLogs(context, true); storageMaintainer.cleanDiskIfFull(context); storageMaintainer.handleCoreDumpsForContainer(context, container, false); var runOrdinaryWireguardTasks = true; if (container.isPresent() && container.get().state().isRunning()) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); runOrdinaryWireguardTasks = false; } if (downloadImageIfNeeded(context, container)) { context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } dropDocsIfNeeded(context, container); container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainers.forEach(maintainer -> maintainer.converge(context)); if (container.isEmpty()) { containerState = STARTING; container = Optional.of(startContainer(context)); containerState = UNKNOWN; runOrdinaryWireguardTasks = true; } else { container = Optional.of(updateContainerIfNeeded(context, container.get())); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (runOrdinaryWireguardTasks) { Optional<Container> finalContainer = container; wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id())); } startServicesIfNeeded(context); resumeNodeIfNeeded(context); if (healthChecker.isPresent()) { healthChecker.get().verifyHealth(context); if (firstSuccessfulHealthCheckInstant.isEmpty()) firstSuccessfulHealthCheckInstant = Optional.of(clock.instant()); Duration timeLeft = Duration.between(clock.instant(), firstSuccessfulHealthCheckInstant.get().plus(warmUpDuration(context))); if (!container.get().resources().equalsCpu(getContainerResources(context))) throw ConvergenceException.ofTransient("Refusing to resume until warm up period ends (" + (timeLeft.isNegative() ? "next tick" : "in " + timeLeft) + ")"); } serviceDumper.processServiceDumpRequest(context); updateNodeRepoWithCurrentAttributes(context, container.map(Container::createdAt)); if (suspendedInOrchestrator || node.orchestratorStatus().isSuspended()) { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); suspendedInOrchestrator = false; } break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.syncLogs(context, false); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context, Optional.empty()); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name()); } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private final VespaServiceDumper serviceDumper; private final List<ContainerWireguardTask> wireguardTasks; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION, serviceDumper, wireguardTasks); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); this.serviceDumper = serviceDumper; this.wireguardTasks = new ArrayList<>(wireguardTasks); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { converge(contextSupplier.nextContext()); } catch (ContextSupplierInterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Invoking vespa-nodectl to start services"); String output = containerOperations.startServices(context); if (!output.isBlank()) { context.log(logger, "Start services output: " + output); } hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, "Invoking vespa-nodectl to resume services"); String output = containerOperations.resumeNode(context); if (!output.isBlank()) { context.log(logger, "Resume services output: " + output); } hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context, Optional<Instant> containerCreatedAt) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); boolean changed = false; if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); changed = true; } boolean createdAtAfterRebootedEvent = context.node().events().stream() .filter(event -> event.type().equals("rebooted")) .map(event -> containerCreatedAt .map(createdAt -> createdAt.isAfter(event.at())) .orElse(false)) .findFirst() .orElse(containerCreatedAt.isPresent()); if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration) || createdAtAfterRebootedEvent) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); changed = true; } Optional<DockerImage> wantedDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), wantedDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = wantedDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(context.node().currentVespaVersion().orElse(Version.emptyVersion)); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); changed = true; } Optional<DropDocumentsReport> report = context.node().reports().getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isPresent() && report.get().startedAt() == null && report.get().readiedAt() != null) { newNodeAttributes.withReport(DropDocumentsReport.reportId(), report.get().withStartedAt(clock.millis()).toJsonNode()); changed = true; } if (changed) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentNodeAttributes, newNodeAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newNodeAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); ContainerData containerData = containerOperations.createContainer(context, wantedResources); writeContainerData(context, containerData); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason); orchestratorSuspendNode(context); ContainerResources currentResources = existingContainer.get().resources(); ContainerResources wantedResources = currentResources.withUnlimitedCpus(); if ( ! warmUpDuration(context).isNegative() && ! wantedResources.equals(currentResources)) { context.log(logger, "Updating container resources: %s -> %s", existingContainer.get().resources().toStringCpu(), wantedResources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.get().id(), wantedResources); } String output = containerOperations.restartVespa(context); if ( ! output.isBlank()) { context.log(logger, "Restart services output: " + output); } currentRestartGeneration = context.node().wantedRestartGeneration(); firstSuccessfulHealthCheckInstant = Optional.empty(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state().isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { if (containerState == ABSENT) return; try { hasResumedNode = false; context.log(logger, "Invoking vespa-nodectl to suspend services"); String output = containerOperations.suspendNode(context); if (!output.isBlank()) { context.log(logger, "Suspend services output: " + output); } } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image())) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image().asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state().isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources())) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources().toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state().isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() == NodeState.active) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources())) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources().toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources().memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev; } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } private void dropDocsIfNeeded(NodeAgentContext context, Optional<Container> container) { Optional<DropDocumentsReport> report = context.node().reports() .getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isEmpty() || report.get().readiedAt() != null) return; if (report.get().droppedAt() == null) { container.ifPresent(c -> removeContainer(context, c, List.of("Dropping documents"), true)); FileFinder.from(context.paths().underVespaHome("var/db/vespa/search")).deleteRecursively(context); nodeRepository.updateNodeAttributes(context.node().hostname(), new NodeAttributes().withReport(DropDocumentsReport.reportId(), report.get().withDroppedAt(clock.millis()).toJsonNode())); } throw ConvergenceException.ofTransient("Documents already dropped, waiting for signal to start the container"); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); if (e.isError()) numberOfUnhandledException++; } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration.dividedBy(zone.getSystemName().isCd() ? 3 : 1); } }
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final ContainerOperations containerOperations; private final RegistryCredentialsProvider registryCredentialsProvider; private final StorageMaintainer storageMaintainer; private final List<CredentialsMaintainer> credentialsMaintainers; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final Clock clock; private final Duration warmUpDuration; private final DoubleFlag containerCpuCap; private final VespaServiceDumper serviceDumper; private final List<ContainerWireguardTask> wireguardTasks; private Thread loopThread; private ContainerState containerState = UNKNOWN; private NodeSpec lastNode; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private Optional<Instant> firstSuccessfulHealthCheckInstant = Optional.empty(); private boolean suspendedInOrchestrator = false; private int numberOfUnhandledException = 0; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this(contextSupplier, nodeRepository, orchestrator, containerOperations, registryCredentialsProvider, storageMaintainer, flagSource, credentialsMaintainers, aclMaintainer, healthChecker, clock, DEFAULT_WARM_UP_DURATION, serviceDumper, wireguardTasks); } public NodeAgentImpl(NodeAgentContextSupplier contextSupplier, NodeRepository nodeRepository, Orchestrator orchestrator, ContainerOperations containerOperations, RegistryCredentialsProvider registryCredentialsProvider, StorageMaintainer storageMaintainer, FlagSource flagSource, List<CredentialsMaintainer> credentialsMaintainers, Optional<AclMaintainer> aclMaintainer, Optional<HealthChecker> healthChecker, Clock clock, Duration warmUpDuration, VespaServiceDumper serviceDumper, List<ContainerWireguardTask> wireguardTasks) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.containerOperations = containerOperations; this.registryCredentialsProvider = registryCredentialsProvider; this.storageMaintainer = storageMaintainer; this.credentialsMaintainers = credentialsMaintainers; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.clock = clock; this.warmUpDuration = warmUpDuration; this.containerCpuCap = PermanentFlags.CONTAINER_CPU_CAP.bindTo(flagSource); this.serviceDumper = serviceDumper; this.wireguardTasks = new ArrayList<>(wireguardTasks); } @Override public void start(NodeAgentContext initialContext) { if (loopThread != null) throw new IllegalStateException("Can not re-start a node agent."); loopThread = new Thread(() -> { while (!terminated.get()) { try { converge(contextSupplier.nextContext()); } catch (ContextSupplierInterruptedException ignored) { } } }); loopThread.setName("tick-" + initialContext.hostname()); loopThread.start(); } @Override public void stopForRemoval(NodeAgentContext context) { if (!terminated.compareAndSet(false, true)) throw new IllegalStateException("Can not re-stop a node agent."); contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); context.log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Invoking vespa-nodectl to start services"); String output = containerOperations.startServices(context); if (!output.isBlank()) { context.log(logger, "Start services output: " + output); } hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, "Invoking vespa-nodectl to resume services"); String output = containerOperations.resumeNode(context); if (!output.isBlank()) { context.log(logger, "Resume services output: " + output); } hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context, Optional<Instant> containerCreatedAt) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); boolean changed = false; if (context.node().wantedRestartGeneration().isPresent() && !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); changed = true; } boolean createdAtAfterRebootedEvent = context.node().events().stream() .filter(event -> event.type().equals("rebooted")) .map(event -> containerCreatedAt .map(createdAt -> createdAt.isAfter(event.at())) .orElse(false)) .findFirst() .orElse(containerCreatedAt.isPresent()); if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration) || createdAtAfterRebootedEvent) { currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); changed = true; } Optional<DockerImage> wantedDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().currentDockerImage(), wantedDockerImage)) { DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = wantedDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(context.node().currentVespaVersion().orElse(Version.emptyVersion)); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); changed = true; } Optional<DropDocumentsReport> report = context.node().reports().getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isPresent() && report.get().startedAt() == null && report.get().readiedAt() != null) { newNodeAttributes.withReport(DropDocumentsReport.reportId(), report.get().withStartedAt(clock.millis()).toJsonNode()); changed = true; } if (changed) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentNodeAttributes, newNodeAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newNodeAttributes); } } private Container startContainer(NodeAgentContext context) { ContainerResources wantedResources = warmUpDuration(context).isNegative() ? getContainerResources(context) : getContainerResources(context).withUnlimitedCpus(); ContainerData containerData = containerOperations.createContainer(context, wantedResources); writeContainerData(context, containerData); containerOperations.startContainer(context); currentRebootGeneration = context.node().wantedRebootGeneration(); currentRestartGeneration = context.node().wantedRestartGeneration(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just started")); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { List<String> reasons = shouldRemoveContainer(context, existingContainer.get()); if (!reasons.isEmpty()) { removeContainer(context, existingContainer.get(), reasons, false); return Optional.empty(); } shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> { context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason); orchestratorSuspendNode(context); ContainerResources currentResources = existingContainer.get().resources(); ContainerResources wantedResources = currentResources.withUnlimitedCpus(); if ( ! warmUpDuration(context).isNegative() && ! wantedResources.equals(currentResources)) { context.log(logger, "Updating container resources: %s -> %s", existingContainer.get().resources().toStringCpu(), wantedResources.toStringCpu()); containerOperations.updateContainer(context, existingContainer.get().id(), wantedResources); } String output = containerOperations.restartVespa(context); if ( ! output.isBlank()) { context.log(logger, "Restart services output: " + output); } currentRestartGeneration = context.node().wantedRestartGeneration(); firstSuccessfulHealthCheckInstant = Optional.empty(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeAgentContext context, Container existingContainer) { NodeSpec node = context.node(); if (!existingContainer.state().isRunning() || node.state() != NodeState.active) return Optional.empty(); if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void stopServicesIfNeeded(NodeAgentContext context) { if (hasStartedServices && context.node().owner().isEmpty()) stopServices(context); } private void stopServices(NodeAgentContext context) { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; hasStartedServices = hasResumedNode = false; firstSuccessfulHealthCheckInstant = Optional.empty(); containerOperations.stopServices(context); } @Override public void stopForHostSuspension(NodeAgentContext context) { getContainer(context).ifPresent(container -> removeContainer(context, container, List.of("Suspending host"), true)); } public void suspend(NodeAgentContext context) { if (containerState == ABSENT) return; try { hasResumedNode = false; context.log(logger, "Invoking vespa-nodectl to suspend services"); String output = containerOperations.suspendNode(context); if (!output.isBlank()) { context.log(logger, "Suspend services output: " + output); } } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed trying to suspend container", e); } } private List<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().state(); List<String> reasons = new ArrayList<>(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) reasons.add("Node in state " + nodeState + ", container should no longer be running"); if (context.node().wantedDockerImage().isPresent() && !context.node().wantedDockerImage().get().equals(existingContainer.image())) { reasons.add("The node is supposed to run a new Docker image: " + existingContainer.image().asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state().isRunning()) reasons.add("Container no longer running"); if (currentRebootGeneration < context.node().wantedRebootGeneration()) { reasons.add(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().wantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources())) { reasons.add("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources().toStringMemory()); } if (containerState == STARTING) reasons.add("Container failed to start"); return reasons; } private void removeContainer(NodeAgentContext context, Container existingContainer, List<String> reasons, boolean alreadySuspended) { context.log(logger, "Will remove container: " + String.join(", ", reasons)); if (existingContainer.state().isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().state() == NodeState.active) { suspend(context); } stopServices(context); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true); containerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private Container updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (healthChecker.isPresent() && firstSuccessfulHealthCheckInstant .map(clock.instant().minus(warmUpDuration(context))::isBefore) .orElse(true)) return existingContainer; if (wantedContainerResources.equalsCpu(existingContainer.resources())) return existingContainer; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources().toStringCpu()); containerOperations.updateContainer(context, existingContainer.id(), wantedContainerResources.withMemoryBytes(existingContainer.resources().memoryBytes())); return containerOperations.getContainer(context).orElseThrow(() -> ConvergenceException.ofError("Did not find container that was just updated")); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.vcpuOnThisHost() * containerCpuCap .with(FetchVector.Dimension.APPLICATION_ID, context.node().owner().map(ApplicationId::serializedForm)) .with(FetchVector.Dimension.CLUSTER_ID, context.node().membership().map(NodeMembership::clusterId)) .with(FetchVector.Dimension.CLUSTER_TYPE, context.node().membership().map(membership -> membership.type().value())) .with(FetchVector.Dimension.HOSTNAME, context.node().hostname()) .value(); return ContainerResources.from(cpuCap, context.vcpuOnThisHost(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev; } private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) { NodeSpec node = context.node(); if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false; RegistryCredentials credentials = registryCredentialsProvider.get(); return node.wantedDockerImage() .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials)) .orElse(false); } private void dropDocsIfNeeded(NodeAgentContext context, Optional<Container> container) { Optional<DropDocumentsReport> report = context.node().reports() .getReport(DropDocumentsReport.reportId(), DropDocumentsReport.class); if (report.isEmpty() || report.get().readiedAt() != null) return; if (report.get().droppedAt() == null) { container.ifPresent(c -> removeContainer(context, c, List.of("Dropping documents"), true)); FileFinder.from(context.paths().underVespaHome("var/db/vespa/search")).deleteRecursively(context); nodeRepository.updateNodeAttributes(context.node().hostname(), new NodeAttributes().withReport(DropDocumentsReport.reportId(), report.get().withDroppedAt(clock.millis()).toJsonNode())); } throw ConvergenceException.ofTransient("Documents already dropped, waiting for signal to start the container"); } public void converge(NodeAgentContext context) { try { doConverge(context); context.log(logger, Level.INFO, "Converged"); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); if (e.isError()) numberOfUnhandledException++; } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, Level.SEVERE, "Unhandled exception, ignoring", e); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, Level.INFO, "Changes to node: " + builder); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = containerOperations.getContainer(context); if (container.isEmpty()) containerState = ABSENT; return container; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); suspendedInOrchestrator = true; } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { } protected List<CredentialsMaintainer> credentialsMaintainers() { return credentialsMaintainers; } private Duration warmUpDuration(NodeAgentContext context) { ZoneApi zone = context.zone(); Optional<NodeMembership> membership = context.node().membership(); return zone.getEnvironment().isTest() || context.nodeType() != NodeType.tenant || membership.map(mem -> ! (mem.type().hasContainer() || mem.type().isAdmin())).orElse(false) ? Duration.ofSeconds(-1) : warmUpDuration.dividedBy(zone.getSystemName().isCd() ? 3 : 1); } }
FYI `HttpRequest.getConnectedAt` returns the timestamp when the TLS/HTTP connection was initiated, not when the request was received. `HttpRequest.relativeCreatedAtNanoTime()` returns the time the request was handed over from Jetty.
private static long doomMillis(HttpRequest request) { long connectedAtMillis = request.getConnectedAt(MILLISECONDS); long requestTimeoutMillis = getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis()); return connectedAtMillis + requestTimeoutMillis; }
long connectedAtMillis = request.getConnectedAt(MILLISECONDS);
private static long doomMillis(HttpRequest request) { long createdAtMillis = request.creationTime(MILLISECONDS); long requestTimeoutMillis = getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis()); return createdAtMillis + requestTimeoutMillis; }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
Hmm, how's that for HTTP/2 with multiple requests per connection? I guess we could use the JDisc creationTime, but in one of my dumps, the difference between the two was 13s, and in the other it was 2s and 0.14s, all of which I think are a bit high.
private static long doomMillis(HttpRequest request) { long connectedAtMillis = request.getConnectedAt(MILLISECONDS); long requestTimeoutMillis = getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis()); return connectedAtMillis + requestTimeoutMillis; }
long connectedAtMillis = request.getConnectedAt(MILLISECONDS);
private static long doomMillis(HttpRequest request) { long createdAtMillis = request.creationTime(MILLISECONDS); long requestTimeoutMillis = getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis()); return createdAtMillis + requestTimeoutMillis; }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
... well, if the connection had several requests, I guess that could explain the difference.
private static long doomMillis(HttpRequest request) { long connectedAtMillis = request.getConnectedAt(MILLISECONDS); long requestTimeoutMillis = getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis()); return connectedAtMillis + requestTimeoutMillis; }
long connectedAtMillis = request.getConnectedAt(MILLISECONDS);
private static long doomMillis(HttpRequest request) { long createdAtMillis = request.creationTime(MILLISECONDS); long requestTimeoutMillis = getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis()); return createdAtMillis + requestTimeoutMillis; }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } ParsedDocumentOperation parsePut(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.PUT); } ParsedDocumentOperation parseUpdate(InputStream inputStream, String docId) { return parse(inputStream, docId, DocumentOperationType.UPDATE); } private ParsedDocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { try { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } catch (IllegalArgumentException e) { incrementMetricParseError(); throw e; } } }
Thread-safe to mutate shared request config builder?
public void dispatch(HttpRequest wrapped, CompletableFuture<HttpResponse> vessel) { Endpoint leastBusy = endpoints.get(0); int min = Integer.MAX_VALUE; int start = ++someNumber % endpoints.size(); for (int i = 0; i < endpoints.size(); i++) { Endpoint endpoint = endpoints.get((i + start) % endpoints.size()); int inflight = endpoint.inflight.get(); if (inflight < min) { leastBusy = endpoint; min = inflight; } } Endpoint endpoint = leastBusy; endpoint.inflight.incrementAndGet(); dispatchExecutor.execute(() -> { try { SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path()); request.setScheme(endpoint.url.getScheme()); request.setAuthority(new URIAuthority(endpoint.url.getHost(), portOf(endpoint.url))); long timeoutMillis = wrapped.timeout() == null ? 190_000 : wrapped.timeout().toMillis() * 11 / 10 + 1_000; request.setConfig(requestConfig.setResponseTimeout(Timeout.ofMilliseconds(timeoutMillis)).build()); defaultHeaders.forEach(request::setHeader); wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get())); if (wrapped.body() != null) { byte[] body = wrapped.body(); if (compression == gzip || compression == auto && body.length > 512) { request.setHeader(gzipEncodingHeader); body = gzipped(body); } request.setBody(body, ContentType.APPLICATION_JSON); } Future<?> future = endpoint.client.execute(request, new FutureCallback<SimpleHttpResponse>() { @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); } @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); } @Override public void cancelled() { vessel.cancel(false); } }); Future<?> cancellation = timeoutExecutor.schedule(() -> { future.cancel(true); vessel.cancel(true); }, timeoutMillis + 10_000, TimeUnit.MILLISECONDS); vessel.whenComplete((__, ___) -> cancellation.cancel(true)); } catch (Throwable thrown) { vessel.completeExceptionally(thrown); } vessel.whenComplete((__, ___) -> endpoint.inflight.decrementAndGet()); }); }
request.setConfig(requestConfig.setResponseTimeout(Timeout.ofMilliseconds(timeoutMillis)).build());
public void dispatch(HttpRequest wrapped, CompletableFuture<HttpResponse> vessel) { Endpoint leastBusy = endpoints.get(0); int min = Integer.MAX_VALUE; int start = ++someNumber % endpoints.size(); for (int i = 0; i < endpoints.size(); i++) { Endpoint endpoint = endpoints.get((i + start) % endpoints.size()); int inflight = endpoint.inflight.get(); if (inflight < min) { leastBusy = endpoint; min = inflight; } } Endpoint endpoint = leastBusy; endpoint.inflight.incrementAndGet(); dispatchExecutor.execute(() -> { try { SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path()); request.setScheme(endpoint.url.getScheme()); request.setAuthority(new URIAuthority(endpoint.url.getHost(), portOf(endpoint.url))); long timeoutMillis = wrapped.timeout() == null ? 190_000 : wrapped.timeout().toMillis() * 11 / 10 + 1_000; request.setConfig(RequestConfig.copy(requestConfig).setResponseTimeout(Timeout.ofMilliseconds(timeoutMillis)).build()); defaultHeaders.forEach(request::setHeader); wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get())); if (wrapped.body() != null) { byte[] body = wrapped.body(); if (compression == gzip || compression == auto && body.length > 512) { request.setHeader(gzipEncodingHeader); body = gzipped(body); } request.setBody(body, ContentType.APPLICATION_JSON); } Future<?> future = endpoint.client.execute(request, new FutureCallback<SimpleHttpResponse>() { @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); } @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); } @Override public void cancelled() { vessel.cancel(false); } }); Future<?> cancellation = timeoutExecutor.schedule(() -> { future.cancel(true); vessel.cancel(true); }, timeoutMillis + 10_000, TimeUnit.MILLISECONDS); vessel.whenComplete((__, ___) -> cancellation.cancel(true)); } catch (Throwable thrown) { vessel.completeExceptionally(thrown); } vessel.whenComplete((__, ___) -> endpoint.inflight.decrementAndGet()); }); }
class ApacheCluster implements Cluster { private final List<Endpoint> endpoints = new ArrayList<>(); private final List<BasicHeader> defaultHeaders = Arrays.asList(new BasicHeader(HttpHeaders.USER_AGENT, String.format("vespa-feed-client/%s", Vespa.VERSION)), new BasicHeader("Vespa-Client-Version", Vespa.VERSION)); private final Header gzipEncodingHeader = new BasicHeader(HttpHeaders.CONTENT_ENCODING, "gzip"); private final RequestConfig.Builder requestConfig; private final Compression compression; private int someNumber = 0; private final ExecutorService dispatchExecutor = Executors.newFixedThreadPool(8, t -> new Thread(t, "request-dispatch-thread")); private final ScheduledExecutorService timeoutExecutor = Executors.newSingleThreadScheduledExecutor(t -> new Thread(t, "request-timeout-thread")); ApacheCluster(FeedClientBuilderImpl builder) throws IOException { for (int i = 0; i < builder.connectionsPerEndpoint; i++) for (URI endpoint : builder.endpoints) endpoints.add(new Endpoint(createHttpClient(builder), endpoint)); this.requestConfig = createRequestConfig(builder); this.compression = builder.compression; } @Override private byte[] gzipped(byte[] content) throws IOException{ ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10); try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) { zip.write(content); } return buffer.toByteArray(); } @Override public void close() { Throwable thrown = null; dispatchExecutor.shutdownNow().forEach(Runnable::run); for (Endpoint endpoint : endpoints) { try { endpoint.client.close(); } catch (Throwable t) { if (thrown == null) thrown = t; else thrown.addSuppressed(t); } } timeoutExecutor.shutdownNow().forEach(Runnable::run); if (thrown != null) throw new RuntimeException(thrown); } private static class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } } @SuppressWarnings("deprecation") private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setTlsDetailsFactory(TlsDetailsFactory::create) .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); return HttpAsyncClients.createHttp2Minimal(H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(2) .setTcpNoDelay(true) .setSoTimeout(Timeout.ofSeconds(10)) .build(), tlsStrategyBuilder.build()); } private static int portOf(URI url) { return url.getPort() == -1 ? url.getScheme().equals("http") ? 80 : 443 : url.getPort(); } @SuppressWarnings("deprecation") private static RequestConfig.Builder createRequestConfig(FeedClientBuilderImpl b) { RequestConfig.Builder builder = RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(10)) .setConnectionRequestTimeout(Timeout.DISABLED); if (b.proxy != null) builder.setProxy(new HttpHost(b.proxy.getScheme(), b.proxy.getHost(), b.proxy.getPort())); return builder; } private static class ApacheHttpResponse implements HttpResponse { private final SimpleHttpResponse wrapped; private ApacheHttpResponse(SimpleHttpResponse wrapped) { this.wrapped = wrapped; } @Override public int code() { return wrapped.getCode(); } @Override public byte[] body() { return wrapped.getBodyBytes(); } @Override public String contentType() { return wrapped.getContentType().getMimeType(); } @Override public String toString() { return "HTTP response with code " + code() + (body() != null ? " and body '" + wrapped.getBodyText() + "'" : ""); } } }
class ApacheCluster implements Cluster { private final List<Endpoint> endpoints = new ArrayList<>(); private final List<BasicHeader> defaultHeaders = Arrays.asList(new BasicHeader(HttpHeaders.USER_AGENT, String.format("vespa-feed-client/%s", Vespa.VERSION)), new BasicHeader("Vespa-Client-Version", Vespa.VERSION)); private final Header gzipEncodingHeader = new BasicHeader(HttpHeaders.CONTENT_ENCODING, "gzip"); private final RequestConfig requestConfig; private final Compression compression; private int someNumber = 0; private final ExecutorService dispatchExecutor = Executors.newFixedThreadPool(8, t -> new Thread(t, "request-dispatch-thread")); private final ScheduledExecutorService timeoutExecutor = Executors.newSingleThreadScheduledExecutor(t -> new Thread(t, "request-timeout-thread")); ApacheCluster(FeedClientBuilderImpl builder) throws IOException { for (int i = 0; i < builder.connectionsPerEndpoint; i++) for (URI endpoint : builder.endpoints) endpoints.add(new Endpoint(createHttpClient(builder), endpoint)); this.requestConfig = createRequestConfig(builder); this.compression = builder.compression; } @Override private byte[] gzipped(byte[] content) throws IOException{ ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10); try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) { zip.write(content); } return buffer.toByteArray(); } @Override public void close() { Throwable thrown = null; dispatchExecutor.shutdownNow().forEach(Runnable::run); for (Endpoint endpoint : endpoints) { try { endpoint.client.close(); } catch (Throwable t) { if (thrown == null) thrown = t; else thrown.addSuppressed(t); } } timeoutExecutor.shutdownNow().forEach(Runnable::run); if (thrown != null) throw new RuntimeException(thrown); } private static class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } } @SuppressWarnings("deprecation") private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setTlsDetailsFactory(TlsDetailsFactory::create) .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); return HttpAsyncClients.createHttp2Minimal(H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(2) .setTcpNoDelay(true) .setSoTimeout(Timeout.ofSeconds(10)) .build(), tlsStrategyBuilder.build()); } private static int portOf(URI url) { return url.getPort() == -1 ? url.getScheme().equals("http") ? 80 : 443 : url.getPort(); } @SuppressWarnings("deprecation") private static RequestConfig createRequestConfig(FeedClientBuilderImpl b) { RequestConfig.Builder builder = RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(10)) .setConnectionRequestTimeout(Timeout.DISABLED); if (b.proxy != null) builder.setProxy(new HttpHost(b.proxy.getScheme(), b.proxy.getHost(), b.proxy.getPort())); return builder.build(); } private static class ApacheHttpResponse implements HttpResponse { private final SimpleHttpResponse wrapped; private ApacheHttpResponse(SimpleHttpResponse wrapped) { this.wrapped = wrapped; } @Override public int code() { return wrapped.getCode(); } @Override public byte[] body() { return wrapped.getBodyBytes(); } @Override public String contentType() { return wrapped.getContentType().getMimeType(); } @Override public String toString() { return "HTTP response with code " + code() + (body() != null ? " and body '" + wrapped.getBodyText() + "'" : ""); } } }
Right ... this used to be single-threaded dispatch :)
public void dispatch(HttpRequest wrapped, CompletableFuture<HttpResponse> vessel) { Endpoint leastBusy = endpoints.get(0); int min = Integer.MAX_VALUE; int start = ++someNumber % endpoints.size(); for (int i = 0; i < endpoints.size(); i++) { Endpoint endpoint = endpoints.get((i + start) % endpoints.size()); int inflight = endpoint.inflight.get(); if (inflight < min) { leastBusy = endpoint; min = inflight; } } Endpoint endpoint = leastBusy; endpoint.inflight.incrementAndGet(); dispatchExecutor.execute(() -> { try { SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path()); request.setScheme(endpoint.url.getScheme()); request.setAuthority(new URIAuthority(endpoint.url.getHost(), portOf(endpoint.url))); long timeoutMillis = wrapped.timeout() == null ? 190_000 : wrapped.timeout().toMillis() * 11 / 10 + 1_000; request.setConfig(requestConfig.setResponseTimeout(Timeout.ofMilliseconds(timeoutMillis)).build()); defaultHeaders.forEach(request::setHeader); wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get())); if (wrapped.body() != null) { byte[] body = wrapped.body(); if (compression == gzip || compression == auto && body.length > 512) { request.setHeader(gzipEncodingHeader); body = gzipped(body); } request.setBody(body, ContentType.APPLICATION_JSON); } Future<?> future = endpoint.client.execute(request, new FutureCallback<SimpleHttpResponse>() { @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); } @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); } @Override public void cancelled() { vessel.cancel(false); } }); Future<?> cancellation = timeoutExecutor.schedule(() -> { future.cancel(true); vessel.cancel(true); }, timeoutMillis + 10_000, TimeUnit.MILLISECONDS); vessel.whenComplete((__, ___) -> cancellation.cancel(true)); } catch (Throwable thrown) { vessel.completeExceptionally(thrown); } vessel.whenComplete((__, ___) -> endpoint.inflight.decrementAndGet()); }); }
request.setConfig(requestConfig.setResponseTimeout(Timeout.ofMilliseconds(timeoutMillis)).build());
public void dispatch(HttpRequest wrapped, CompletableFuture<HttpResponse> vessel) { Endpoint leastBusy = endpoints.get(0); int min = Integer.MAX_VALUE; int start = ++someNumber % endpoints.size(); for (int i = 0; i < endpoints.size(); i++) { Endpoint endpoint = endpoints.get((i + start) % endpoints.size()); int inflight = endpoint.inflight.get(); if (inflight < min) { leastBusy = endpoint; min = inflight; } } Endpoint endpoint = leastBusy; endpoint.inflight.incrementAndGet(); dispatchExecutor.execute(() -> { try { SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path()); request.setScheme(endpoint.url.getScheme()); request.setAuthority(new URIAuthority(endpoint.url.getHost(), portOf(endpoint.url))); long timeoutMillis = wrapped.timeout() == null ? 190_000 : wrapped.timeout().toMillis() * 11 / 10 + 1_000; request.setConfig(RequestConfig.copy(requestConfig).setResponseTimeout(Timeout.ofMilliseconds(timeoutMillis)).build()); defaultHeaders.forEach(request::setHeader); wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get())); if (wrapped.body() != null) { byte[] body = wrapped.body(); if (compression == gzip || compression == auto && body.length > 512) { request.setHeader(gzipEncodingHeader); body = gzipped(body); } request.setBody(body, ContentType.APPLICATION_JSON); } Future<?> future = endpoint.client.execute(request, new FutureCallback<SimpleHttpResponse>() { @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); } @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); } @Override public void cancelled() { vessel.cancel(false); } }); Future<?> cancellation = timeoutExecutor.schedule(() -> { future.cancel(true); vessel.cancel(true); }, timeoutMillis + 10_000, TimeUnit.MILLISECONDS); vessel.whenComplete((__, ___) -> cancellation.cancel(true)); } catch (Throwable thrown) { vessel.completeExceptionally(thrown); } vessel.whenComplete((__, ___) -> endpoint.inflight.decrementAndGet()); }); }
class ApacheCluster implements Cluster { private final List<Endpoint> endpoints = new ArrayList<>(); private final List<BasicHeader> defaultHeaders = Arrays.asList(new BasicHeader(HttpHeaders.USER_AGENT, String.format("vespa-feed-client/%s", Vespa.VERSION)), new BasicHeader("Vespa-Client-Version", Vespa.VERSION)); private final Header gzipEncodingHeader = new BasicHeader(HttpHeaders.CONTENT_ENCODING, "gzip"); private final RequestConfig.Builder requestConfig; private final Compression compression; private int someNumber = 0; private final ExecutorService dispatchExecutor = Executors.newFixedThreadPool(8, t -> new Thread(t, "request-dispatch-thread")); private final ScheduledExecutorService timeoutExecutor = Executors.newSingleThreadScheduledExecutor(t -> new Thread(t, "request-timeout-thread")); ApacheCluster(FeedClientBuilderImpl builder) throws IOException { for (int i = 0; i < builder.connectionsPerEndpoint; i++) for (URI endpoint : builder.endpoints) endpoints.add(new Endpoint(createHttpClient(builder), endpoint)); this.requestConfig = createRequestConfig(builder); this.compression = builder.compression; } @Override private byte[] gzipped(byte[] content) throws IOException{ ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10); try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) { zip.write(content); } return buffer.toByteArray(); } @Override public void close() { Throwable thrown = null; dispatchExecutor.shutdownNow().forEach(Runnable::run); for (Endpoint endpoint : endpoints) { try { endpoint.client.close(); } catch (Throwable t) { if (thrown == null) thrown = t; else thrown.addSuppressed(t); } } timeoutExecutor.shutdownNow().forEach(Runnable::run); if (thrown != null) throw new RuntimeException(thrown); } private static class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } } @SuppressWarnings("deprecation") private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setTlsDetailsFactory(TlsDetailsFactory::create) .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); return HttpAsyncClients.createHttp2Minimal(H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(2) .setTcpNoDelay(true) .setSoTimeout(Timeout.ofSeconds(10)) .build(), tlsStrategyBuilder.build()); } private static int portOf(URI url) { return url.getPort() == -1 ? url.getScheme().equals("http") ? 80 : 443 : url.getPort(); } @SuppressWarnings("deprecation") private static RequestConfig.Builder createRequestConfig(FeedClientBuilderImpl b) { RequestConfig.Builder builder = RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(10)) .setConnectionRequestTimeout(Timeout.DISABLED); if (b.proxy != null) builder.setProxy(new HttpHost(b.proxy.getScheme(), b.proxy.getHost(), b.proxy.getPort())); return builder; } private static class ApacheHttpResponse implements HttpResponse { private final SimpleHttpResponse wrapped; private ApacheHttpResponse(SimpleHttpResponse wrapped) { this.wrapped = wrapped; } @Override public int code() { return wrapped.getCode(); } @Override public byte[] body() { return wrapped.getBodyBytes(); } @Override public String contentType() { return wrapped.getContentType().getMimeType(); } @Override public String toString() { return "HTTP response with code " + code() + (body() != null ? " and body '" + wrapped.getBodyText() + "'" : ""); } } }
class ApacheCluster implements Cluster { private final List<Endpoint> endpoints = new ArrayList<>(); private final List<BasicHeader> defaultHeaders = Arrays.asList(new BasicHeader(HttpHeaders.USER_AGENT, String.format("vespa-feed-client/%s", Vespa.VERSION)), new BasicHeader("Vespa-Client-Version", Vespa.VERSION)); private final Header gzipEncodingHeader = new BasicHeader(HttpHeaders.CONTENT_ENCODING, "gzip"); private final RequestConfig requestConfig; private final Compression compression; private int someNumber = 0; private final ExecutorService dispatchExecutor = Executors.newFixedThreadPool(8, t -> new Thread(t, "request-dispatch-thread")); private final ScheduledExecutorService timeoutExecutor = Executors.newSingleThreadScheduledExecutor(t -> new Thread(t, "request-timeout-thread")); ApacheCluster(FeedClientBuilderImpl builder) throws IOException { for (int i = 0; i < builder.connectionsPerEndpoint; i++) for (URI endpoint : builder.endpoints) endpoints.add(new Endpoint(createHttpClient(builder), endpoint)); this.requestConfig = createRequestConfig(builder); this.compression = builder.compression; } @Override private byte[] gzipped(byte[] content) throws IOException{ ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10); try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) { zip.write(content); } return buffer.toByteArray(); } @Override public void close() { Throwable thrown = null; dispatchExecutor.shutdownNow().forEach(Runnable::run); for (Endpoint endpoint : endpoints) { try { endpoint.client.close(); } catch (Throwable t) { if (thrown == null) thrown = t; else thrown.addSuppressed(t); } } timeoutExecutor.shutdownNow().forEach(Runnable::run); if (thrown != null) throw new RuntimeException(thrown); } private static class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } } @SuppressWarnings("deprecation") private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setTlsDetailsFactory(TlsDetailsFactory::create) .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); return HttpAsyncClients.createHttp2Minimal(H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(2) .setTcpNoDelay(true) .setSoTimeout(Timeout.ofSeconds(10)) .build(), tlsStrategyBuilder.build()); } private static int portOf(URI url) { return url.getPort() == -1 ? url.getScheme().equals("http") ? 80 : 443 : url.getPort(); } @SuppressWarnings("deprecation") private static RequestConfig createRequestConfig(FeedClientBuilderImpl b) { RequestConfig.Builder builder = RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(10)) .setConnectionRequestTimeout(Timeout.DISABLED); if (b.proxy != null) builder.setProxy(new HttpHost(b.proxy.getScheme(), b.proxy.getHost(), b.proxy.getPort())); return builder.build(); } private static class ApacheHttpResponse implements HttpResponse { private final SimpleHttpResponse wrapped; private ApacheHttpResponse(SimpleHttpResponse wrapped) { this.wrapped = wrapped; } @Override public int code() { return wrapped.getCode(); } @Override public byte[] body() { return wrapped.getBodyBytes(); } @Override public String contentType() { return wrapped.getContentType().getMimeType(); } @Override public String toString() { return "HTTP response with code " + code() + (body() != null ? " and body '" + wrapped.getBodyText() + "'" : ""); } } }
This will short circuit on the first file it deletes
private boolean deleteTenantCredentials(NodeAgentContext context) { var siaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY, context.users().vespa()); var identityDocumentFile = siaDirectory.resolve(TENANT.getIdentityDocument()); if (!Files.exists(identityDocumentFile)) return false; return getAthenzIdentity(context, TENANT, identityDocumentFile).map(athenzIdentity -> { var privateKeyFile = (ContainerPath) SiaUtils.getPrivateKeyFile(siaDirectory, athenzIdentity); var certificateFile = (ContainerPath) SiaUtils.getCertificateFile(siaDirectory, athenzIdentity); try { return Files.deleteIfExists(identityDocumentFile) || Files.deleteIfExists(privateKeyFile) || Files.deleteIfExists(certificateFile); } catch (IOException e) { throw new UncheckedIOException(e); } }).orElse(false); }
return Files.deleteIfExists(identityDocumentFile) ||
private boolean deleteTenantCredentials(NodeAgentContext context) { var siaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY, context.users().vespa()); var identityDocumentFile = siaDirectory.resolve(TENANT.getIdentityDocument()); if (!Files.exists(identityDocumentFile)) return false; return getAthenzIdentity(context, TENANT, identityDocumentFile).map(athenzIdentity -> { var privateKeyFile = (ContainerPath) SiaUtils.getPrivateKeyFile(siaDirectory, athenzIdentity); var certificateFile = (ContainerPath) SiaUtils.getCertificateFile(siaDirectory, athenzIdentity); try { return Files.deleteIfExists(identityDocumentFile) || Files.deleteIfExists(privateKeyFile) || Files.deleteIfExists(certificateFile); } catch (IOException e) { throw new UncheckedIOException(e); } }).orElse(false); }
class AthenzCredentialsMaintainer implements CredentialsMaintainer { private static final Logger logger = Logger.getLogger(AthenzCredentialsMaintainer.class.getName()); private static final Duration EXPIRY_MARGIN = Duration.ofDays(1); private static final Duration REFRESH_PERIOD = Duration.ofDays(1); private static final Duration REFRESH_BACKOFF = Duration.ofHours(1); private static final String CONTAINER_SIA_DIRECTORY = "/var/lib/sia"; private final URI ztsEndpoint; private final Path ztsTrustStorePath; private final Clock clock; private final String certificateDnsSuffix; private final ServiceIdentityProvider hostIdentityProvider; private final IdentityDocumentClient identityDocumentClient; private final BooleanFlag tenantServiceIdentityFlag; private final BooleanFlag useNewIdentityDocumentLayout; private final Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>(); public AthenzCredentialsMaintainer(URI ztsEndpoint, Path ztsTrustStorePath, ConfigServerInfo configServerInfo, String certificateDnsSuffix, ServiceIdentityProvider hostIdentityProvider, FlagSource flagSource, Clock clock) { this.ztsEndpoint = ztsEndpoint; this.ztsTrustStorePath = ztsTrustStorePath; this.certificateDnsSuffix = certificateDnsSuffix; this.hostIdentityProvider = hostIdentityProvider; this.identityDocumentClient = new DefaultIdentityDocumentClient( configServerInfo.getLoadBalancerEndpoint(), hostIdentityProvider, new AthenzIdentityVerifier(Set.of(configServerInfo.getConfigServerIdentity()))); this.clock = clock; this.tenantServiceIdentityFlag = Flags.NODE_ADMIN_TENANT_SERVICE_REGISTRY.bindTo(flagSource); this.useNewIdentityDocumentLayout = Flags.NEW_IDDOC_LAYOUT.bindTo(flagSource); } public boolean converge(NodeAgentContext context) { var modified = false; modified |= maintain(context, NODE); if (context.zone().getSystemName().isPublic()) return modified; if (shouldWriteTenantServiceIdentity(context)) modified |= maintain(context, TENANT); else modified |= deleteTenantCredentials(context); return modified; } private boolean maintain(NodeAgentContext context, IdentityType identityType) { if (context.isDisabled(NodeAgentTask.CredentialsMaintainer)) return false; try { context.log(logger, Level.FINE, "Checking certificate"); ContainerPath siaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY, context.users().vespa()); ContainerPath identityDocumentFile = siaDirectory.resolve(identityType.getIdentityDocument()); Optional<AthenzIdentity> optionalAthenzIdentity = getAthenzIdentity(context, identityType, identityDocumentFile); if (optionalAthenzIdentity.isEmpty()) return false; AthenzIdentity athenzIdentity = optionalAthenzIdentity.get(); ContainerPath privateKeyFile = (ContainerPath) SiaUtils.getPrivateKeyFile(siaDirectory, athenzIdentity); ContainerPath certificateFile = (ContainerPath) SiaUtils.getCertificateFile(siaDirectory, athenzIdentity); if (!Files.exists(privateKeyFile) || !Files.exists(certificateFile) || !Files.exists(identityDocumentFile)) { context.log(logger, "Certificate/private key/identity document file does not exist"); Files.createDirectories(privateKeyFile.getParent()); Files.createDirectories(certificateFile.getParent()); Files.createDirectories(identityDocumentFile.getParent()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, athenzIdentity); return true; } X509Certificate certificate = readCertificateFromFile(certificateFile); Instant now = clock.instant(); Instant expiry = certificate.getNotAfter().toInstant(); var doc = EntityBindingsMapper.readSignedIdentityDocumentFromFile(identityDocumentFile); if (refreshIdentityDocument(doc, context)) { context.log(logger, "Identity document is outdated (version=%d)", doc.documentVersion()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, athenzIdentity); return true; } else if (isCertificateExpired(expiry, now)) { context.log(logger, "Certificate has expired (expiry=%s)", expiry.toString()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, athenzIdentity); return true; } Duration age = Duration.between(certificate.getNotBefore().toInstant(), now); if (shouldRefreshCredentials(age)) { context.log(logger, "Certificate is ready to be refreshed (age=%s)", age.toString()); if (shouldThrottleRefreshAttempts(context.containerName(), now)) { context.log(logger, Level.WARNING, String.format( "Skipping refresh attempt as last refresh was on %s (less than %s ago)", lastRefreshAttempt.get(context.containerName()).toString(), REFRESH_BACKOFF.toString())); return false; } else { lastRefreshAttempt.put(context.containerName(), now); refreshIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, doc.identityDocument(), identityType, athenzIdentity); return true; } } context.log(logger, Level.FINE, "Certificate is still valid"); return false; } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean refreshIdentityDocument(SignedIdentityDocument signedIdentityDocument, NodeAgentContext context) { int expectedVersion = documentVersion(context); return signedIdentityDocument.outdated() || signedIdentityDocument.documentVersion() != expectedVersion; } public void clearCredentials(NodeAgentContext context) { FileFinder.files(context.paths().of(CONTAINER_SIA_DIRECTORY)) .deleteRecursively(context); lastRefreshAttempt.remove(context.containerName()); } @Override public Duration certificateLifetime(NodeAgentContext context) { ContainerPath containerSiaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY); ContainerPath certificateFile = (ContainerPath) SiaUtils.getCertificateFile(containerSiaDirectory, context.identity()); try { X509Certificate certificate = readCertificateFromFile(certificateFile); Instant now = clock.instant(); Instant expiry = certificate.getNotAfter().toInstant(); return Duration.between(now, expiry); } catch (IOException e) { context.log(logger, Level.SEVERE, "Unable to read certificate at " + certificateFile, e); return Duration.ZERO; } } @Override public String name() { return "node-certificate"; } private boolean shouldRefreshCredentials(Duration age) { return age.compareTo(REFRESH_PERIOD) >= 0; } private boolean shouldThrottleRefreshAttempts(ContainerName containerName, Instant now) { return REFRESH_BACKOFF.compareTo( Duration.between( lastRefreshAttempt.getOrDefault(containerName, Instant.EPOCH), now)) > 0; } private void registerIdentity(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile, ContainerPath identityDocumentFile, IdentityType identityType, AthenzIdentity identity) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); SignedIdentityDocument signedDoc = signedIdentityDocument(context, identityType); IdentityDocument doc = signedDoc.identityDocument(); CsrGenerator csrGenerator = new CsrGenerator(certificateDnsSuffix, doc.providerService().getFullName()); Pkcs10Csr csr = csrGenerator.generateInstanceCsr( identity, doc.providerUniqueId(), doc.ipAddresses(), doc.clusterType(), keyPair); HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true; try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withIdentityProvider(hostIdentityProvider).withHostnameVerifier(ztsHostNameVerifier).build()) { InstanceIdentity instanceIdentity = ztsClient.registerInstance( doc.providerService(), identity, EntityBindingsMapper.toAttestationData(signedDoc), csr); EntityBindingsMapper.writeSignedIdentityDocumentToFile(identityDocumentFile, signedDoc); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully registered and credentials written to file"); } } /** * Return zts url from identity document, fallback to ztsEndpoint */ private URI ztsEndpoint(IdentityDocument doc) { return Optional.ofNullable(doc.ztsUrl()) .filter(s -> !s.isBlank()) .map(URI::create) .orElse(ztsEndpoint); } private void refreshIdentity(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile, ContainerPath identityDocumentFile, IdentityDocument doc, IdentityType identityType, AthenzIdentity identity) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); CsrGenerator csrGenerator = new CsrGenerator(certificateDnsSuffix, doc.providerService().getFullName()); Pkcs10Csr csr = csrGenerator.generateInstanceCsr( identity, doc.providerUniqueId(), doc.ipAddresses(), doc.clusterType(), keyPair); SSLContext containerIdentitySslContext = new SslContextBuilder().withKeyStore(privateKeyFile, certificateFile) .withTrustStore(ztsTrustStorePath) .build(); try { HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true; try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withSslContext(containerIdentitySslContext).withHostnameVerifier(ztsHostNameVerifier).build()) { InstanceIdentity instanceIdentity = ztsClient.refreshInstance( doc.providerService(), identity, doc.providerUniqueId().asDottedString(), csr); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully refreshed and credentials written to file"); } catch (ZtsClientException e) { if (e.getErrorCode() == 403 && e.getDescription().startsWith("Certificate revoked")) { context.log(logger, Level.SEVERE, "Certificate cannot be refreshed as it is revoked by ZTS - re-registering the instance now", e); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, identity); } else { throw e; } } } catch (Exception e) { context.log(logger, Level.SEVERE, "Certificate refresh failed: " + e.getMessage(), e); } } private static void writePrivateKeyAndCertificate(ContainerPath privateKeyFile, PrivateKey privateKey, ContainerPath certificateFile, X509Certificate certificate) { writeFile(privateKeyFile, KeyUtils.toPem(privateKey)); writeFile(certificateFile, X509CertificateUtils.toPem(certificate)); } private static void writeFile(ContainerPath path, String utf8Content) { new UnixPath(path.resolveSibling(path.getFileName() + ".tmp")) .writeUtf8File(utf8Content, "r--------") .atomicMove(path); } private static X509Certificate readCertificateFromFile(ContainerPath certificateFile) throws IOException { String pemEncodedCertificate = new String(Files.readAllBytes(certificateFile)); return X509CertificateUtils.fromPem(pemEncodedCertificate); } private static boolean isCertificateExpired(Instant expiry, Instant now) { return now.isAfter(expiry.minus(EXPIRY_MARGIN)); } private SignedIdentityDocument signedIdentityDocument(NodeAgentContext context, IdentityType identityType) { return switch (identityType) { case NODE -> identityDocumentClient.getNodeIdentityDocument(context.hostname().value(), documentVersion(context)); case TENANT -> identityDocumentClient.getTenantIdentityDocument(context.hostname().value(), documentVersion(context)).get(); }; } private Optional<AthenzIdentity> getAthenzIdentity(NodeAgentContext context, IdentityType identityType, ContainerPath identityDocumentFile) { return switch (identityType) { case NODE -> Optional.of(context.identity()); case TENANT -> getTenantIdentity(context, identityDocumentFile); }; } private Optional<AthenzIdentity> getTenantIdentity(NodeAgentContext context, ContainerPath identityDocumentFile) { if (Files.exists(identityDocumentFile)) { return Optional.of(EntityBindingsMapper.readSignedIdentityDocumentFromFile(identityDocumentFile).identityDocument().serviceIdentity()); } else { return identityDocumentClient.getTenantIdentityDocument(context.hostname().value(), documentVersion(context)) .map(doc -> doc.identityDocument().serviceIdentity()); } } private boolean shouldWriteTenantServiceIdentity(NodeAgentContext context) { var version = context.node().currentVespaVersion() .orElse(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); var appId = context.node().owner().orElse(ApplicationId.defaultId()); return tenantServiceIdentityFlag .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString()) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .value(); } /* Get the document version to ask for */ private int documentVersion(NodeAgentContext context) { return useNewIdentityDocumentLayout .with(FetchVector.Dimension.HOSTNAME, context.hostname().value()) .value() ? SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION : SignedIdentityDocument.LEGACY_DEFAULT_DOCUMENT_VERSION; } enum IdentityType { NODE("vespa-node-identity-document.json"), TENANT("vespa-tenant-identity-document.json"); private String identityDocument; IdentityType(String identityDocument) { this.identityDocument = identityDocument; } public String getIdentityDocument() { return identityDocument; } } }
class AthenzCredentialsMaintainer implements CredentialsMaintainer { private static final Logger logger = Logger.getLogger(AthenzCredentialsMaintainer.class.getName()); private static final Duration EXPIRY_MARGIN = Duration.ofDays(1); private static final Duration REFRESH_PERIOD = Duration.ofDays(1); private static final Duration REFRESH_BACKOFF = Duration.ofHours(1); private static final String CONTAINER_SIA_DIRECTORY = "/var/lib/sia"; private final URI ztsEndpoint; private final Path ztsTrustStorePath; private final Clock clock; private final String certificateDnsSuffix; private final ServiceIdentityProvider hostIdentityProvider; private final IdentityDocumentClient identityDocumentClient; private final BooleanFlag tenantServiceIdentityFlag; private final BooleanFlag useNewIdentityDocumentLayout; private final Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>(); public AthenzCredentialsMaintainer(URI ztsEndpoint, Path ztsTrustStorePath, ConfigServerInfo configServerInfo, String certificateDnsSuffix, ServiceIdentityProvider hostIdentityProvider, FlagSource flagSource, Clock clock) { this.ztsEndpoint = ztsEndpoint; this.ztsTrustStorePath = ztsTrustStorePath; this.certificateDnsSuffix = certificateDnsSuffix; this.hostIdentityProvider = hostIdentityProvider; this.identityDocumentClient = new DefaultIdentityDocumentClient( configServerInfo.getLoadBalancerEndpoint(), hostIdentityProvider, new AthenzIdentityVerifier(Set.of(configServerInfo.getConfigServerIdentity()))); this.clock = clock; this.tenantServiceIdentityFlag = Flags.NODE_ADMIN_TENANT_SERVICE_REGISTRY.bindTo(flagSource); this.useNewIdentityDocumentLayout = Flags.NEW_IDDOC_LAYOUT.bindTo(flagSource); } public boolean converge(NodeAgentContext context) { var modified = false; modified |= maintain(context, NODE); if (context.zone().getSystemName().isPublic()) return modified; if (shouldWriteTenantServiceIdentity(context)) modified |= maintain(context, TENANT); else modified |= deleteTenantCredentials(context); return modified; } private boolean maintain(NodeAgentContext context, IdentityType identityType) { if (context.isDisabled(NodeAgentTask.CredentialsMaintainer)) return false; try { context.log(logger, Level.FINE, "Checking certificate"); ContainerPath siaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY, context.users().vespa()); ContainerPath identityDocumentFile = siaDirectory.resolve(identityType.getIdentityDocument()); Optional<AthenzIdentity> optionalAthenzIdentity = getAthenzIdentity(context, identityType, identityDocumentFile); if (optionalAthenzIdentity.isEmpty()) return false; AthenzIdentity athenzIdentity = optionalAthenzIdentity.get(); ContainerPath privateKeyFile = (ContainerPath) SiaUtils.getPrivateKeyFile(siaDirectory, athenzIdentity); ContainerPath certificateFile = (ContainerPath) SiaUtils.getCertificateFile(siaDirectory, athenzIdentity); if (!Files.exists(privateKeyFile) || !Files.exists(certificateFile) || !Files.exists(identityDocumentFile)) { context.log(logger, "Certificate/private key/identity document file does not exist"); Files.createDirectories(privateKeyFile.getParent()); Files.createDirectories(certificateFile.getParent()); Files.createDirectories(identityDocumentFile.getParent()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, athenzIdentity); return true; } X509Certificate certificate = readCertificateFromFile(certificateFile); Instant now = clock.instant(); Instant expiry = certificate.getNotAfter().toInstant(); var doc = EntityBindingsMapper.readSignedIdentityDocumentFromFile(identityDocumentFile); if (refreshIdentityDocument(doc, context)) { context.log(logger, "Identity document is outdated (version=%d)", doc.documentVersion()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, athenzIdentity); return true; } else if (isCertificateExpired(expiry, now)) { context.log(logger, "Certificate has expired (expiry=%s)", expiry.toString()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, athenzIdentity); return true; } Duration age = Duration.between(certificate.getNotBefore().toInstant(), now); if (shouldRefreshCredentials(age)) { context.log(logger, "Certificate is ready to be refreshed (age=%s)", age.toString()); if (shouldThrottleRefreshAttempts(context.containerName(), now)) { context.log(logger, Level.WARNING, String.format( "Skipping refresh attempt as last refresh was on %s (less than %s ago)", lastRefreshAttempt.get(context.containerName()).toString(), REFRESH_BACKOFF.toString())); return false; } else { lastRefreshAttempt.put(context.containerName(), now); refreshIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, doc.identityDocument(), identityType, athenzIdentity); return true; } } context.log(logger, Level.FINE, "Certificate is still valid"); return false; } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean refreshIdentityDocument(SignedIdentityDocument signedIdentityDocument, NodeAgentContext context) { int expectedVersion = documentVersion(context); return signedIdentityDocument.outdated() || signedIdentityDocument.documentVersion() != expectedVersion; } public void clearCredentials(NodeAgentContext context) { FileFinder.files(context.paths().of(CONTAINER_SIA_DIRECTORY)) .deleteRecursively(context); lastRefreshAttempt.remove(context.containerName()); } @Override public Duration certificateLifetime(NodeAgentContext context) { ContainerPath containerSiaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY); ContainerPath certificateFile = (ContainerPath) SiaUtils.getCertificateFile(containerSiaDirectory, context.identity()); try { X509Certificate certificate = readCertificateFromFile(certificateFile); Instant now = clock.instant(); Instant expiry = certificate.getNotAfter().toInstant(); return Duration.between(now, expiry); } catch (IOException e) { context.log(logger, Level.SEVERE, "Unable to read certificate at " + certificateFile, e); return Duration.ZERO; } } @Override public String name() { return "node-certificate"; } private boolean shouldRefreshCredentials(Duration age) { return age.compareTo(REFRESH_PERIOD) >= 0; } private boolean shouldThrottleRefreshAttempts(ContainerName containerName, Instant now) { return REFRESH_BACKOFF.compareTo( Duration.between( lastRefreshAttempt.getOrDefault(containerName, Instant.EPOCH), now)) > 0; } private void registerIdentity(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile, ContainerPath identityDocumentFile, IdentityType identityType, AthenzIdentity identity) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); SignedIdentityDocument signedDoc = signedIdentityDocument(context, identityType); IdentityDocument doc = signedDoc.identityDocument(); CsrGenerator csrGenerator = new CsrGenerator(certificateDnsSuffix, doc.providerService().getFullName()); Pkcs10Csr csr = csrGenerator.generateInstanceCsr( identity, doc.providerUniqueId(), doc.ipAddresses(), doc.clusterType(), keyPair); HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true; try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withIdentityProvider(hostIdentityProvider).withHostnameVerifier(ztsHostNameVerifier).build()) { InstanceIdentity instanceIdentity = ztsClient.registerInstance( doc.providerService(), identity, EntityBindingsMapper.toAttestationData(signedDoc), csr); EntityBindingsMapper.writeSignedIdentityDocumentToFile(identityDocumentFile, signedDoc); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully registered and credentials written to file"); } } /** * Return zts url from identity document, fallback to ztsEndpoint */ private URI ztsEndpoint(IdentityDocument doc) { return Optional.ofNullable(doc.ztsUrl()) .filter(s -> !s.isBlank()) .map(URI::create) .orElse(ztsEndpoint); } private void refreshIdentity(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile, ContainerPath identityDocumentFile, IdentityDocument doc, IdentityType identityType, AthenzIdentity identity) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); CsrGenerator csrGenerator = new CsrGenerator(certificateDnsSuffix, doc.providerService().getFullName()); Pkcs10Csr csr = csrGenerator.generateInstanceCsr( identity, doc.providerUniqueId(), doc.ipAddresses(), doc.clusterType(), keyPair); SSLContext containerIdentitySslContext = new SslContextBuilder().withKeyStore(privateKeyFile, certificateFile) .withTrustStore(ztsTrustStorePath) .build(); try { HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true; try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withSslContext(containerIdentitySslContext).withHostnameVerifier(ztsHostNameVerifier).build()) { InstanceIdentity instanceIdentity = ztsClient.refreshInstance( doc.providerService(), identity, doc.providerUniqueId().asDottedString(), csr); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully refreshed and credentials written to file"); } catch (ZtsClientException e) { if (e.getErrorCode() == 403 && e.getDescription().startsWith("Certificate revoked")) { context.log(logger, Level.SEVERE, "Certificate cannot be refreshed as it is revoked by ZTS - re-registering the instance now", e); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile, identityType, identity); } else { throw e; } } } catch (Exception e) { context.log(logger, Level.SEVERE, "Certificate refresh failed: " + e.getMessage(), e); } } private static void writePrivateKeyAndCertificate(ContainerPath privateKeyFile, PrivateKey privateKey, ContainerPath certificateFile, X509Certificate certificate) { writeFile(privateKeyFile, KeyUtils.toPem(privateKey)); writeFile(certificateFile, X509CertificateUtils.toPem(certificate)); } private static void writeFile(ContainerPath path, String utf8Content) { new UnixPath(path.resolveSibling(path.getFileName() + ".tmp")) .writeUtf8File(utf8Content, "r--------") .atomicMove(path); } private static X509Certificate readCertificateFromFile(ContainerPath certificateFile) throws IOException { String pemEncodedCertificate = new String(Files.readAllBytes(certificateFile)); return X509CertificateUtils.fromPem(pemEncodedCertificate); } private static boolean isCertificateExpired(Instant expiry, Instant now) { return now.isAfter(expiry.minus(EXPIRY_MARGIN)); } private SignedIdentityDocument signedIdentityDocument(NodeAgentContext context, IdentityType identityType) { return switch (identityType) { case NODE -> identityDocumentClient.getNodeIdentityDocument(context.hostname().value(), documentVersion(context)); case TENANT -> identityDocumentClient.getTenantIdentityDocument(context.hostname().value(), documentVersion(context)).get(); }; } private Optional<AthenzIdentity> getAthenzIdentity(NodeAgentContext context, IdentityType identityType, ContainerPath identityDocumentFile) { return switch (identityType) { case NODE -> Optional.of(context.identity()); case TENANT -> getTenantIdentity(context, identityDocumentFile); }; } private Optional<AthenzIdentity> getTenantIdentity(NodeAgentContext context, ContainerPath identityDocumentFile) { if (Files.exists(identityDocumentFile)) { return Optional.of(EntityBindingsMapper.readSignedIdentityDocumentFromFile(identityDocumentFile).identityDocument().serviceIdentity()); } else { return identityDocumentClient.getTenantIdentityDocument(context.hostname().value(), documentVersion(context)) .map(doc -> doc.identityDocument().serviceIdentity()); } } private boolean shouldWriteTenantServiceIdentity(NodeAgentContext context) { var version = context.node().currentVespaVersion() .orElse(context.node().wantedVespaVersion().orElse(Version.emptyVersion)); var appId = context.node().owner().orElse(ApplicationId.defaultId()); return tenantServiceIdentityFlag .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString()) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .value(); } /* Get the document version to ask for */ private int documentVersion(NodeAgentContext context) { return useNewIdentityDocumentLayout .with(FetchVector.Dimension.HOSTNAME, context.hostname().value()) .value() ? SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION : SignedIdentityDocument.LEGACY_DEFAULT_DOCUMENT_VERSION; } enum IdentityType { NODE("vespa-node-identity-document.json"), TENANT("vespa-tenant-identity-document.json"); private String identityDocument; IdentityType(String identityDocument) { this.identityDocument = identityDocument; } public String getIdentityDocument() { return identityDocument; } } }
Nice!
private static void gracefulShutdown(Connection connection) { if (connection instanceof HttpConnection http1) { http1.getGenerator().setPersistent(false); } else if (connection instanceof HTTP2ServerConnection http2) { ((HTTP2Session)http2.getSession()).goAway(GoAwayFrame.GRACEFUL, Callback.NOOP); } }
private static void gracefulShutdown(Connection connection) { if (connection instanceof HttpConnection http1) { http1.getGenerator().setPersistent(false); } else if (connection instanceof HTTP2ServerConnection http2) { ((HTTP2Session)http2.getSession()).goAway(GoAwayFrame.GRACEFUL, Callback.NOOP); } }
class HttpRequestDispatch { private static final Logger log = Logger.getLogger(HttpRequestDispatch.class.getName()); private final static String CHARSET_ANNOTATION = ";charset="; private final JDiscContext jDiscContext; private final Request jettyRequest; private final ServletResponseController servletResponseController; private final RequestHandler requestHandler; private final RequestMetricReporter metricReporter; HttpRequestDispatch(JDiscContext jDiscContext, AccessLogEntry accessLogEntry, Context metricContext, HttpServletRequest servletRequest, HttpServletResponse servletResponse) throws IOException { this.jDiscContext = jDiscContext; requestHandler = newRequestHandler(jDiscContext, accessLogEntry, servletRequest); this.jettyRequest = (Request) servletRequest; this.metricReporter = new RequestMetricReporter(jDiscContext.metric, metricContext, jettyRequest.getTimeStamp()); this.servletResponseController = new ServletResponseController(servletRequest, servletResponse, jDiscContext.janitor, metricReporter, jDiscContext.developerMode()); shutdownConnectionGracefullyIfThresholdReached(jettyRequest); metricReporter.uriLength(jettyRequest.getOriginalURI().length()); } void dispatchRequest() { CompletableFuture<Void> requestCompletion = startServletAsyncExecution(); ServletRequestReader servletRequestReader; try { servletRequestReader = handleRequest(); } catch (Throwable t) { servletResponseController.finishedFuture() .whenComplete((__, ___) -> requestCompletion.completeExceptionally(t)); servletResponseController.trySendErrorResponse(t); return; } servletRequestReader.finishedFuture().whenComplete((__, t) -> { if (t != null) servletResponseController.trySendErrorResponse(t); }); servletResponseController.finishedFuture().whenComplete((__, t) -> { if (t != null) servletRequestReader.fail(t); }); CompletableFuture.allOf(servletRequestReader.finishedFuture(), servletResponseController.finishedFuture()) .whenComplete((r, t) -> { if (t != null) requestCompletion.completeExceptionally(t); else requestCompletion.complete(null); }); servletRequestReader.start(); } private CompletableFuture<Void> startServletAsyncExecution() { CompletableFuture<Void> requestCompletion = new CompletableFuture<>(); AsyncContext asyncCtx = jettyRequest.startAsync(); asyncCtx.setTimeout(0); asyncCtx.addListener(new AsyncListener() { @Override public void onStartAsync(AsyncEvent event) {} @Override public void onComplete(AsyncEvent event) { requestCompletion.complete(null); } @Override public void onTimeout(AsyncEvent event) { requestCompletion.completeExceptionally(new TimeoutException("Timeout from AsyncContext")); } @Override public void onError(AsyncEvent event) { requestCompletion.completeExceptionally(event.getThrowable()); } }); requestCompletion.whenComplete((__, t) -> onRequestFinished(asyncCtx, t)); return requestCompletion; } private void onRequestFinished(AsyncContext asyncCtx, Throwable error) { boolean reportedError = false; if (error != null) { servletResponseController.forceClose(error); if (isErrorOfType(error, EofException.class, IOException.class)) { log.log(Level.FINE, error, () -> "Network connection was unexpectedly terminated: " + jettyRequest.getRequestURI()); metricReporter.prematurelyClosed(); } else if (isErrorOfType(error, TimeoutException.class)) { log.log(Level.FINE, error, () -> "Request/stream was timed out by Jetty: " + jettyRequest.getRequestURI()); } else if (!isErrorOfType(error, OverloadException.class, BindingNotFoundException.class, RequestException.class)) { log.log(Level.WARNING, "Request failed: " + jettyRequest.getRequestURI(), error); } reportedError = true; metricReporter.failedResponse(); } else { metricReporter.successfulResponse(); } try { asyncCtx.complete(); log.finest(() -> "Request completed successfully: " + jettyRequest.getRequestURI()); } catch (Throwable throwable) { Level level = reportedError ? Level.FINE: Level.WARNING; log.log(level, "Async.complete failed", throwable); } } private static void shutdownConnectionGracefullyIfThresholdReached(Request request) { ConnectorConfig connectorConfig = getConnector(request).connectorConfig(); int maxRequestsPerConnection = connectorConfig.maxRequestsPerConnection(); Connection connection = RequestUtils.getConnection(request); if (maxRequestsPerConnection > 0) { if (connection.getMessagesIn() >= maxRequestsPerConnection) { gracefulShutdown(connection); } } double maxConnectionLifeInSeconds = connectorConfig.maxConnectionLife(); if (maxConnectionLifeInSeconds > 0) { long createdAt = connection.getCreatedTimeStamp(); Instant expiredAt = Instant.ofEpochMilli((long) (createdAt + maxConnectionLifeInSeconds * 1000)); boolean isExpired = Instant.now().isAfter(expiredAt); if (isExpired) { gracefulShutdown(connection); } } } @SafeVarargs @SuppressWarnings("varargs") private static boolean isErrorOfType(Throwable throwable, Class<? extends Throwable>... handledTypes) { return Arrays.stream(handledTypes) .anyMatch( exceptionType -> exceptionType.isInstance(throwable) || throwable instanceof CompletionException && exceptionType.isInstance(throwable.getCause())); } @SuppressWarnings("try") private ServletRequestReader handleRequest() throws IOException { HttpRequest jdiscRequest = HttpRequestFactory.newJDiscRequest(jDiscContext.container, jettyRequest); ContentChannel requestContentChannel; try (ResourceReference ref = References.fromResource(jdiscRequest)) { HttpRequestFactory.copyHeaders(jettyRequest, jdiscRequest); requestContentChannel = requestHandler.handleRequest(jdiscRequest, servletResponseController.responseHandler()); } return new ServletRequestReader(jettyRequest, requestContentChannel, jDiscContext.janitor, metricReporter); } private static RequestHandler newRequestHandler(JDiscContext context, AccessLogEntry accessLogEntry, HttpServletRequest servletRequest) { RequestHandler requestHandler = wrapHandlerIfFormPost( new FilteringRequestHandler(context.filterResolver, (Request)servletRequest), servletRequest, context.serverConfig.removeRawPostBodyForWwwUrlEncodedPost()); return new AccessLoggingRequestHandler(requestHandler, accessLogEntry); } private static RequestHandler wrapHandlerIfFormPost(RequestHandler requestHandler, HttpServletRequest servletRequest, boolean removeBodyForFormPost) { if (!servletRequest.getMethod().equals("POST")) { return requestHandler; } String contentType = servletRequest.getHeader(HttpHeaders.Names.CONTENT_TYPE); if (contentType == null) { return requestHandler; } if (!contentType.startsWith(APPLICATION_X_WWW_FORM_URLENCODED)) { return requestHandler; } return new FormPostRequestHandler(requestHandler, getCharsetName(contentType), removeBodyForFormPost); } private static String getCharsetName(String contentType) { if (!contentType.startsWith(CHARSET_ANNOTATION, APPLICATION_X_WWW_FORM_URLENCODED.length())) { return StandardCharsets.UTF_8.name(); } return contentType.substring(APPLICATION_X_WWW_FORM_URLENCODED.length() + CHARSET_ANNOTATION.length()); } }
class HttpRequestDispatch { private static final Logger log = Logger.getLogger(HttpRequestDispatch.class.getName()); private final static String CHARSET_ANNOTATION = ";charset="; private final JDiscContext jDiscContext; private final Request jettyRequest; private final ServletResponseController servletResponseController; private final RequestHandler requestHandler; private final RequestMetricReporter metricReporter; HttpRequestDispatch(JDiscContext jDiscContext, AccessLogEntry accessLogEntry, Context metricContext, HttpServletRequest servletRequest, HttpServletResponse servletResponse) throws IOException { this.jDiscContext = jDiscContext; requestHandler = newRequestHandler(jDiscContext, accessLogEntry, servletRequest); this.jettyRequest = (Request) servletRequest; this.metricReporter = new RequestMetricReporter(jDiscContext.metric, metricContext, jettyRequest.getTimeStamp()); this.servletResponseController = new ServletResponseController(servletRequest, servletResponse, jDiscContext.janitor, metricReporter, jDiscContext.developerMode()); shutdownConnectionGracefullyIfThresholdReached(jettyRequest); metricReporter.uriLength(jettyRequest.getOriginalURI().length()); } void dispatchRequest() { CompletableFuture<Void> requestCompletion = startServletAsyncExecution(); ServletRequestReader servletRequestReader; try { servletRequestReader = handleRequest(); } catch (Throwable t) { servletResponseController.finishedFuture() .whenComplete((__, ___) -> requestCompletion.completeExceptionally(t)); servletResponseController.trySendErrorResponse(t); return; } servletRequestReader.finishedFuture().whenComplete((__, t) -> { if (t != null) servletResponseController.trySendErrorResponse(t); }); servletResponseController.finishedFuture().whenComplete((__, t) -> { if (t != null) servletRequestReader.fail(t); }); CompletableFuture.allOf(servletRequestReader.finishedFuture(), servletResponseController.finishedFuture()) .whenComplete((r, t) -> { if (t != null) requestCompletion.completeExceptionally(t); else requestCompletion.complete(null); }); servletRequestReader.start(); } private CompletableFuture<Void> startServletAsyncExecution() { CompletableFuture<Void> requestCompletion = new CompletableFuture<>(); AsyncContext asyncCtx = jettyRequest.startAsync(); asyncCtx.setTimeout(0); asyncCtx.addListener(new AsyncListener() { @Override public void onStartAsync(AsyncEvent event) {} @Override public void onComplete(AsyncEvent event) { requestCompletion.complete(null); } @Override public void onTimeout(AsyncEvent event) { requestCompletion.completeExceptionally(new TimeoutException("Timeout from AsyncContext")); } @Override public void onError(AsyncEvent event) { requestCompletion.completeExceptionally(event.getThrowable()); } }); requestCompletion.whenComplete((__, t) -> onRequestFinished(asyncCtx, t)); return requestCompletion; } private void onRequestFinished(AsyncContext asyncCtx, Throwable error) { boolean reportedError = false; if (error != null) { servletResponseController.forceClose(error); if (isErrorOfType(error, EofException.class, IOException.class)) { log.log(Level.FINE, error, () -> "Network connection was unexpectedly terminated: " + jettyRequest.getRequestURI()); metricReporter.prematurelyClosed(); } else if (isErrorOfType(error, TimeoutException.class)) { log.log(Level.FINE, error, () -> "Request/stream was timed out by Jetty: " + jettyRequest.getRequestURI()); } else if (!isErrorOfType(error, OverloadException.class, BindingNotFoundException.class, RequestException.class)) { log.log(Level.WARNING, "Request failed: " + jettyRequest.getRequestURI(), error); } reportedError = true; metricReporter.failedResponse(); } else { metricReporter.successfulResponse(); } try { asyncCtx.complete(); log.finest(() -> "Request completed successfully: " + jettyRequest.getRequestURI()); } catch (Throwable throwable) { Level level = reportedError ? Level.FINE: Level.WARNING; log.log(level, "Async.complete failed", throwable); } } private static void shutdownConnectionGracefullyIfThresholdReached(Request request) { ConnectorConfig connectorConfig = getConnector(request).connectorConfig(); int maxRequestsPerConnection = connectorConfig.maxRequestsPerConnection(); Connection connection = RequestUtils.getConnection(request); if (maxRequestsPerConnection > 0) { if (connection.getMessagesIn() >= maxRequestsPerConnection) { gracefulShutdown(connection); } } double maxConnectionLifeInSeconds = connectorConfig.maxConnectionLife(); if (maxConnectionLifeInSeconds > 0) { long createdAt = connection.getCreatedTimeStamp(); Instant expiredAt = Instant.ofEpochMilli((long) (createdAt + maxConnectionLifeInSeconds * 1000)); boolean isExpired = Instant.now().isAfter(expiredAt); if (isExpired) { gracefulShutdown(connection); } } } @SafeVarargs @SuppressWarnings("varargs") private static boolean isErrorOfType(Throwable throwable, Class<? extends Throwable>... handledTypes) { return Arrays.stream(handledTypes) .anyMatch( exceptionType -> exceptionType.isInstance(throwable) || throwable instanceof CompletionException && exceptionType.isInstance(throwable.getCause())); } @SuppressWarnings("try") private ServletRequestReader handleRequest() throws IOException { HttpRequest jdiscRequest = HttpRequestFactory.newJDiscRequest(jDiscContext.container, jettyRequest); ContentChannel requestContentChannel; try (ResourceReference ref = References.fromResource(jdiscRequest)) { HttpRequestFactory.copyHeaders(jettyRequest, jdiscRequest); requestContentChannel = requestHandler.handleRequest(jdiscRequest, servletResponseController.responseHandler()); } return new ServletRequestReader(jettyRequest, requestContentChannel, jDiscContext.janitor, metricReporter); } private static RequestHandler newRequestHandler(JDiscContext context, AccessLogEntry accessLogEntry, HttpServletRequest servletRequest) { RequestHandler requestHandler = wrapHandlerIfFormPost( new FilteringRequestHandler(context.filterResolver, (Request)servletRequest), servletRequest, context.serverConfig.removeRawPostBodyForWwwUrlEncodedPost()); return new AccessLoggingRequestHandler(requestHandler, accessLogEntry); } private static RequestHandler wrapHandlerIfFormPost(RequestHandler requestHandler, HttpServletRequest servletRequest, boolean removeBodyForFormPost) { if (!servletRequest.getMethod().equals("POST")) { return requestHandler; } String contentType = servletRequest.getHeader(HttpHeaders.Names.CONTENT_TYPE); if (contentType == null) { return requestHandler; } if (!contentType.startsWith(APPLICATION_X_WWW_FORM_URLENCODED)) { return requestHandler; } return new FormPostRequestHandler(requestHandler, getCharsetName(contentType), removeBodyForFormPost); } private static String getCharsetName(String contentType) { if (!contentType.startsWith(CHARSET_ANNOTATION, APPLICATION_X_WWW_FORM_URLENCODED.length())) { return StandardCharsets.UTF_8.name(); } return contentType.substring(APPLICATION_X_WWW_FORM_URLENCODED.length() + CHARSET_ANNOTATION.length()); } }
Remove deconstruct - lifecycle of `AthenzIdentityProvider` is already managed.
public void deconstruct() { athenzIdentityProvider.deconstruct(); }
athenzIdentityProvider.deconstruct();
public void deconstruct() {}
class ServiceIdentityProviderProvider implements Provider<ServiceIdentityProvider> { private AthenzIdentityProvider athenzIdentityProvider; @Inject public ServiceIdentityProviderProvider(AthenzIdentityProvider athenzIdentityProvider) { this.athenzIdentityProvider = athenzIdentityProvider; } @Override public ServiceIdentityProvider get() { if (athenzIdentityProvider instanceof AthenzIdentityProviderImpl impl) return impl; if (athenzIdentityProvider instanceof LegacyAthenzIdentityProviderImpl legacyImpl) return legacyImpl; return null; } @Override }
class ServiceIdentityProviderProvider implements Provider<ServiceIdentityProvider> { private AthenzIdentityProvider athenzIdentityProvider; @Inject public ServiceIdentityProviderProvider(AthenzIdentityProvider athenzIdentityProvider) { this.athenzIdentityProvider = athenzIdentityProvider; } @Override public ServiceIdentityProvider get() { if (athenzIdentityProvider instanceof AthenzIdentityProviderImpl impl) return impl; if (athenzIdentityProvider instanceof LegacyAthenzIdentityProviderImpl legacyImpl) return legacyImpl; return null; } @Override }
```suggestion .collect(Collectors.joining(", ")); ```
private String servicesNotConvergedFormatted(ServiceListResponse response) { return response.services().stream() .filter(service -> service.currentGeneration != response.wantedGeneration) .map(service -> service.serviceInfo.getHostName() + ":" + service.serviceInfo.getServiceName() + " on generation " + service.currentGeneration) .collect(Collectors.joining(",")); }
.collect(Collectors.joining(","));
private String servicesNotConvergedFormatted(ServiceListResponse response) { return response.services().stream() .filter(service -> service.currentGeneration != response.wantedGeneration) .map(service -> service.serviceInfo.getHostName() + ":" + service.serviceInfo.getServiceName() + " on generation " + service.currentGeneration) .collect(Collectors.joining(", ")); }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); try (ActionTimer timer = applicationRepository.timerFor(params.getApplicationId(), "deployment.prepareMillis")) { this.configChangeActions = sessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } catch (Exception e) { log.log(Level.FINE, "Preparing session " + session.getSessionId() + " failed, deleting it"); deleteSession(); throw e; } } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); waitForResourcesOrTimeout(params, session, provisioner); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); try { Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); waitForActivation(applicationId, timeoutBudget, activation); } catch (Exception e) { log.log(Level.FINE, "Activating session " + session.getSessionId() + " failed, deleting it"); deleteSession(); throw e; } restartServicesIfNeeded(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void waitForActivation(ApplicationId applicationId, TimeoutBudget timeoutBudget, Activation activation) { activation.awaitCompletion(timeoutBudget.timeLeft()); Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void deleteSession() { sessionRepository().deleteLocalSession(session.getSessionId()); } private SessionRepository sessionRepository() { return tenant.getSessionRepository(); } private void restartServicesIfNeeded(ApplicationId applicationId) { if (provisioner.isEmpty() || configChangeActions == null) return; RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if (restartActions.isEmpty()) return; waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.hostnames(); provisioner.get().restart(applicationId, HostFilter.from(hostnames)); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = configChangeActions.withRestartActions(new RestartActions()); } private void waitForConfigToConverge(ApplicationId applicationId) { deployLogger.log(Level.INFO, "Wait for all services to use new config generation before restarting"); var convergenceChecker = applicationRepository.configConvergenceChecker(); var app = applicationRepository.getActiveApplication(applicationId); ServiceListResponse response = null; while (timeLeft(applicationId, response)) { response = convergenceChecker.checkConvergenceUnlessDeferringChangesUntilRestart(app); if (response.converged) { deployLogger.log(Level.INFO, "Services converged on new config generation " + response.currentGeneration); return; } else { deployLogger.log(Level.INFO, "Services that did not converge on new config generation " + response.wantedGeneration + ": " + servicesNotConvergedFormatted(response) + ". Will retry"); try { Thread.sleep(5_000); } catch (InterruptedException e) { /* ignore */ } } } } private boolean timeLeft(ApplicationId applicationId, ServiceListResponse response) { try { params.get().getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for config convergence for " + applicationId + ", wanted generation " + response.wantedGeneration + ", these services had another generation: " + servicesNotConvergedFormatted(response)); } catch (UncheckedTimeoutException e) { throw new ConfigNotConvergedException(e); } return true; } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return new Memoized<>(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); session.getCloudAccount().ifPresent(params::cloudAccount); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); AtomicReference<Exception> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try (ProvisionLock lock = provisioner.get().lock(session.getApplicationId())) { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (ApplicationLockException | TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); try (ActionTimer timer = applicationRepository.timerFor(params.getApplicationId(), "deployment.prepareMillis")) { this.configChangeActions = sessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } catch (Exception e) { log.log(Level.FINE, "Preparing session " + session.getSessionId() + " failed, deleting it"); deleteSession(); throw e; } } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); waitForResourcesOrTimeout(params, session, provisioner); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); try { Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); waitForActivation(applicationId, timeoutBudget, activation); } catch (Exception e) { log.log(Level.FINE, "Activating session " + session.getSessionId() + " failed, deleting it"); deleteSession(); throw e; } restartServicesIfNeeded(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void waitForActivation(ApplicationId applicationId, TimeoutBudget timeoutBudget, Activation activation) { activation.awaitCompletion(timeoutBudget.timeLeft()); Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void deleteSession() { sessionRepository().deleteLocalSession(session.getSessionId()); } private SessionRepository sessionRepository() { return tenant.getSessionRepository(); } private void restartServicesIfNeeded(ApplicationId applicationId) { if (provisioner.isEmpty() || configChangeActions == null) return; RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if (restartActions.isEmpty()) return; waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.hostnames(); provisioner.get().restart(applicationId, HostFilter.from(hostnames)); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = configChangeActions.withRestartActions(new RestartActions()); } private void waitForConfigToConverge(ApplicationId applicationId) { deployLogger.log(Level.INFO, "Wait for all services to use new config generation before restarting"); var convergenceChecker = applicationRepository.configConvergenceChecker(); var app = applicationRepository.getActiveApplication(applicationId); ServiceListResponse response = null; while (timeLeft(applicationId, response)) { response = convergenceChecker.checkConvergenceUnlessDeferringChangesUntilRestart(app); if (response.converged) { deployLogger.log(Level.INFO, "Services converged on new config generation " + response.currentGeneration); return; } else { deployLogger.log(Level.INFO, "Services that did not converge on new config generation " + response.wantedGeneration + ": " + servicesNotConvergedFormatted(response) + ". Will retry"); try { Thread.sleep(5_000); } catch (InterruptedException e) { /* ignore */ } } } } private boolean timeLeft(ApplicationId applicationId, ServiceListResponse response) { try { params.get().getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for config convergence for " + applicationId + ", wanted generation " + response.wantedGeneration + ", these services had another generation: " + servicesNotConvergedFormatted(response)); } catch (UncheckedTimeoutException e) { throw new ConfigNotConvergedException(e); } return true; } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return new Memoized<>(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); session.getCloudAccount().ifPresent(params::cloudAccount); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); AtomicReference<Exception> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try (ProvisionLock lock = provisioner.get().lock(session.getApplicationId())) { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (ApplicationLockException | TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
```suggestion assertLogContainsMessage(deployLogger, "Services that did not converge on new config generation 2: hostName:serviceName on generation 1, hostName2:serviceName2 on generation 1. Will retry"); ```
public void testConfigConvergenceBeforeRestart() { List<Host> hosts = createHosts(9, "6.1.0", "6.2.0"); List<ServiceInfo> services = createServices(); List<ModelFactory> modelFactories = List.of( new ConfigChangeActionsModelFactory(Version.fromString("6.1.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "change", services)), new ConfigChangeActionsModelFactory(Version.fromString("6.2.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "other change", services))); DeployTester tester = createTester(hosts, modelFactories, prodZone, Clock.systemUTC(), new MockConfigConvergenceChecker(2L, services)); var result = tester.deployApp("src/test/apps/hosted/", "6.2.0"); DeployHandlerLogger deployLogger = result.deployLogger(); System.out.println(SlimeUtils.toJson(deployLogger.slime().get())); assertLogContainsMessage(deployLogger, "Wait for all services to use new config generation before restarting"); assertLogContainsMessage(deployLogger, "Services that did not converge on new config generation 2: hostName:serviceName on generation 1,hostName2:serviceName2 on generation 1. Will retry"); assertLogContainsMessage(deployLogger, "Services converged on new config generation 2"); }
assertLogContainsMessage(deployLogger, "Services that did not converge on new config generation 2: hostName:serviceName on generation 1,hostName2:serviceName2 on generation 1. Will retry");
public void testConfigConvergenceBeforeRestart() { List<Host> hosts = createHosts(9, "6.1.0", "6.2.0"); List<ServiceInfo> services = createServices(); List<ModelFactory> modelFactories = List.of( new ConfigChangeActionsModelFactory(Version.fromString("6.1.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "change", services)), new ConfigChangeActionsModelFactory(Version.fromString("6.2.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "other change", services))); DeployTester tester = createTester(hosts, modelFactories, prodZone, Clock.systemUTC(), new MockConfigConvergenceChecker(2L, services)); var result = tester.deployApp("src/test/apps/hosted/", "6.2.0"); DeployHandlerLogger deployLogger = result.deployLogger(); System.out.println(SlimeUtils.toJson(deployLogger.slime().get())); assertLogContainsMessage(deployLogger, "Wait for all services to use new config generation before restarting"); assertLogContainsMessage(deployLogger, "Services that did not converge on new config generation 2: hostName:serviceName on generation 1, hostName2:serviceName2 on generation 1. Will retry"); assertLogContainsMessage(deployLogger, "Services converged on new config generation 2"); }
class HostedDeployTest { private final Zone prodZone = new Zone(Environment.prod, RegionName.defaultName()); private final Zone devZone = new Zone(Environment.dev, RegionName.defaultName()); @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testRedeployWithVersion() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); tester.deployApp("src/test/apps/hosted/", "4.5.6"); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals("4.5.6", ((Deployment) deployment.get()).session().getVespaVersion().toString()); } @Test public void testRedeploy() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory()) .build(); ApplicationId appId = tester.applicationId(); tester.deployApp("src/test/apps/hosted/"); assertFalse(tester.applicationRepository().getActiveSession(appId).get().getMetaData().isInternalRedeploy()); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); deployment.get().activate(); assertTrue(tester.applicationRepository().getActiveSession(appId).get().getMetaData().isInternalRedeploy()); } @Test public void testReDeployWithWantedDockerImageRepositoryAndAthenzDomain() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); String dockerImageRepository = "docker.foo.com:4443/bar/baz"; tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder() .vespaVersion("4.5.6") .dockerImageRepository(dockerImageRepository) .athenzDomain("myDomain")); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals("4.5.6", ((Deployment) deployment.get()).session().getVespaVersion().toString()); assertEquals(DockerImage.fromString(dockerImageRepository), ((Deployment) deployment.get()).session().getDockerImageRepository().get()); assertEquals("myDomain", ((Deployment) deployment.get()).session().getAthenzDomain().get().value()); } @Test public void testRedeployWithTenantSecretStores() { List<TenantSecretStore> tenantSecretStores = List.of(new TenantSecretStore("foo", "123", "role")); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder() .vespaVersion("4.5.6") .tenantSecretStores(tenantSecretStores)); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals(tenantSecretStores, ((Deployment) deployment.get()).session().getTenantSecretStores()); } @Test public void testDeployOnUnknownVersion() { List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("1.0.0"))); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactories(modelFactories) .build(); tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder()); try { tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder().vespaVersion("1.0.1").isBootstrap(true)); } catch (InternalServerException expected) { assertTrue(expected.getCause() instanceof UnknownVespaVersionException); } try { tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder().vespaVersion("1.0.1")); fail("Requesting an unknown node version should not be allowed"); } catch (UnknownVespaVersionException expected) { } } @Test public void testDeployMultipleVersions() { List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("6.1.0")), createHostedModelFactory(Version.fromString("6.2.0")), createHostedModelFactory(Version.fromString("7.0.0"))); DeployTester tester = new DeployTester.Builder(temporaryFolder).modelFactories(modelFactories) .hostedConfigserverConfig(Zone.defaultZone()) .build(); tester.deployApp("src/test/apps/hosted/", "6.2.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); } /** * Test that only the minimal set of models are created (model versions used on hosts, the wanted version * and the latest version for the latest major) */ @Test public void testCreateOnlyNeededModelVersions() { List<Host> hosts = createHosts(9, "6.0.0", "6.1.0", null, "6.1.0"); CountingModelFactory factory600 = createHostedModelFactory(Version.fromString("6.0.0")); CountingModelFactory factory610 = createHostedModelFactory(Version.fromString("6.1.0")); CountingModelFactory factory620 = createHostedModelFactory(Version.fromString("6.2.0")); CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory710 = createHostedModelFactory(Version.fromString("7.1.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0")); List<ModelFactory> modelFactories = List.of(factory600, factory610, factory620, factory700, factory710, factory720); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "7.0.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory600.creationCount() > 0); assertTrue(factory610.creationCount() > 0); assertFalse(factory620.creationCount() > 0); assertTrue(factory700.creationCount() > 0); assertFalse(factory710.creationCount() > 0); assertTrue("Newest is always included", factory720.creationCount() > 0); } /** * Test that only the minimal set of models are created (the wanted version and the latest version for * the latest major, since nodes are without version) */ @Test public void testCreateOnlyNeededModelVersionsNewNodes() { List<Host> hosts = createHosts(9, (String) null); CountingModelFactory factory600 = createHostedModelFactory(Version.fromString("6.0.0")); CountingModelFactory factory610 = createHostedModelFactory(Version.fromString("6.1.0")); CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0")); List<ModelFactory> modelFactories = List.of(factory600, factory610, factory700, factory720); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "7.0.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory700.creationCount() > 0); assertTrue("Newest model for latest major version is always included", factory720.creationCount() > 0); } /** * Test that deploying an application in a manually deployed zone creates all needed model versions * (not just the latest one, manually deployed apps always have skipOldConfigModels set to true) */ @Test public void testCreateNeededModelVersionsForManuallyDeployedApps() { List<Host> hosts = createHosts(7, "7.0.0"); CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0"), devZone); CountingModelFactory factory710 = createHostedModelFactory(Version.fromString("7.1.0"), devZone); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0"), devZone); List<ModelFactory> modelFactories = List.of(factory700, factory710, factory720); DeployTester tester = createTester(hosts, modelFactories, devZone); tester.deployApp("src/test/apps/hosted/", "7.2.0"); assertEquals(7, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory700.creationCount() > 0); assertFalse(factory710.creationCount() > 0); assertTrue("Newest model for latest major version is always included", factory720.creationCount() > 0); } /** * Test that deploying an application in a manually deployed zone creates latest model version successfully, * even if creating one of the older model fails */ @Test public void testCreateModelVersionsForManuallyDeployedAppsWhenCreatingFailsForOneVersion() { List<Host> hosts = createHosts(7, "7.0.0"); ModelFactory factory700 = createFailingModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0"), devZone); List<ModelFactory> modelFactories = List.of(factory700, factory720); DeployTester tester = createTester(hosts, modelFactories, devZone, Clock.systemUTC()); tester.deployApp("src/test/apps/hosted/", "7.2.0"); assertEquals(7, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue("Newest model for latest major version is always included", factory720.creationCount() > 0); } /** * Tests that we create the minimal set of models and that version 7.x is created * if creating version 8.x fails (to support upgrades to new major version for applications * that are still using features that do not work on version 8.x) */ @Test public void testWantedVersionIsRequiredAlsoWhenThereIsAnOlderMajorThatDoesNotFailModelBuilding() { int oldMajor = 7; int newMajor = 8; Version wantedVersion = new Version(newMajor, 1, 2); Version oldVersion = new Version(oldMajor, 2, 3); List<Host> hosts = createHosts(9, oldVersion.toFullString()); CountingModelFactory oldFactory = createHostedModelFactory(oldVersion); ModelFactory newFactory = createFailingModelFactory(wantedVersion); List<ModelFactory> modelFactories = List.of(oldFactory, newFactory); DeployTester tester = createTester(hosts, modelFactories, prodZone); assertEquals("Invalid application", assertThrows(IllegalArgumentException.class, () -> tester.deployApp("src/test/apps/hosted/", wantedVersion.toFullString())) .getMessage()); tester.deployApp("src/test/apps/hosted/", oldVersion.toFullString()); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(oldFactory.creationCount() > 0); } /** * Tests that we create the minimal set of models and that version 7.x is created * if creating version 8.x fails (to support upgrades to new major version for applications * that are still using features that do not work on version 8.x) */ @Test public void testCreateLatestMajorOnPreviousMajorIfItFailsOnMajorVersion8() { deployWithModelForLatestMajorVersionFailing(8); } /** * Tests that we fail deployment for version 7.x if creating version 7.x fails (i.e. that we do not skip * building 7.x and only build version 6.x). Skipping creation of models for a major version is only supported * for major version >= 8 (see test above) or when major-version=6 is set in application package. */ @Test(expected = InvalidApplicationException.class) public void testFailingToCreateModelVersion7FailsDeployment() { deployWithModelForLatestMajorVersionFailing(7); } /** * Tests that we create the minimal set of models, but latest model version is created for * previous major if creating latest model version on latest major version fails **/ private void deployWithModelForLatestMajorVersionFailing(int newestMajorVersion) { int oldestMajorVersion = newestMajorVersion - 1; String oldestVersion = oldestMajorVersion + ".0.0"; String newestOnOldMajorVersion = oldestMajorVersion + ".1.0"; String newestOnNewMajorVersion = newestMajorVersion + ".2.0"; List<Host> hosts = createHosts(9, oldestVersion, newestOnOldMajorVersion); CountingModelFactory factory1 = createHostedModelFactory(Version.fromString(oldestVersion)); CountingModelFactory factory2 = createHostedModelFactory(Version.fromString(newestOnOldMajorVersion)); ModelFactory factory3 = createFailingModelFactory(Version.fromString(newestOnNewMajorVersion)); List<ModelFactory> modelFactories = List.of(factory1, factory2, factory3); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", oldestVersion); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory1.creationCount() > 0); assertTrue("Latest model for previous major version is included if latest model for latest major version fails to build", factory2.creationCount() > 0); } /** * Tests that we fail deployment if a needed model version fails to be created */ @Test(expected = InvalidApplicationException.class) public void testDeploymentFailsIfNeededModelVersionFails() { List<Host> hosts = createHosts(7, "7.0.0"); List<ModelFactory> modelFactories = List.of(createFailingModelFactory(Version.fromString("7.0.0")), createHostedModelFactory(Version.fromString("7.1.0"))); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "7.1.0"); } /** * Test that deploying an application works when there are no allocated hosts in the system * (the bootstrap a new zone case, so deploying the routing app since that is the first deployment * that will be done) */ @Test public void testCreateOnlyNeededModelVersionsWhenNoHostsAllocated() { CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0")); List<ModelFactory> modelFactories = List.of(factory700, factory720); DeployTester tester = createTester(createHosts(1, (String) null), modelFactories, prodZone); tester.deployApp("src/test/apps/hosted-routing-app/", "7.2.0"); assertFalse(factory700.creationCount() > 0); assertTrue("Newest is always included", factory720.creationCount() > 0); } @Test public void testAccessControlIsOnlyCheckedWhenNoProdDeploymentExists() { List<Host> hosts = createHosts(18, "6.0.0"); List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("6.0.0")), createHostedModelFactory(Version.fromString("6.1.0")), createHostedModelFactory(Version.fromString("6.2.0"))); DeployTester tester = createTester(hosts, modelFactories, prodZone, Clock.systemUTC()); ApplicationId applicationId = tester.applicationId(); tester.deployApp("src/test/apps/hosted/", "6.0.0"); assertEquals(9, tester.getAllocatedHostsOf(applicationId).getHosts().size()); tester.deployApp("src/test/apps/hosted-no-write-access-control", "6.1.0"); assertEquals(9, tester.getAllocatedHostsOf(applicationId).getHosts().size()); } @Test public void testRedeployAfterExpiredValidationOverride() { ManualClock clock = new ManualClock("2016-10-09T00:00:00"); List<ModelFactory> modelFactories = List.of(createHostedModelFactory(clock), createFailingModelFactory(Version.fromString("1.0.0"))); DeployTester tester = new DeployTester.Builder(temporaryFolder).modelFactories(modelFactories) .build(); tester.deployApp("src/test/apps/validationOverride/"); { Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); deployment.get().activate(); } clock.advance(Duration.ofDays(2)); { Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); deployment.get().activate(); } { try { tester.deployApp("src/test/apps/validationOverride/", "myApp"); fail("Expected redeployment to fail"); } catch (Exception expected) { } } } @Test public void testThatConfigChangeActionsAreCollectedFromAllModels() { List<Host> hosts = createHosts(9, "6.1.0", "6.2.0"); List<ServiceInfo> services = createServices(); List<ModelFactory> modelFactories = List.of( new ConfigChangeActionsModelFactory(Version.fromString("6.1.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "change", services)), new ConfigChangeActionsModelFactory(Version.fromString("6.2.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "other change", services))); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "6.2.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); } @Test private void assertLogContainsMessage(DeployHandlerLogger log, String message) { assertEquals(1, SlimeUtils.entriesStream(log.slime().get().field("log")) .map(entry -> entry.field("message").asString()) .filter(m -> m.contains(message)) .count()); } @Test public void testThatAllowedConfigChangeActionsAreActedUpon() { List<Host> hosts = createHosts(9, "6.1.0"); List<ServiceInfo> services = createServices(); ManualClock clock = new ManualClock(Instant.EPOCH); List<ModelFactory> modelFactories = List.of( new ConfigChangeActionsModelFactory(Version.fromString("6.1.0"), VespaReindexAction.of(ClusterSpec.Id.from("test"), ValidationId.indexModeChange, "reindex please", services, "music"), new VespaRestartAction(ClusterSpec.Id.from("test"), "change", services))); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactories(modelFactories) .clock(clock) .zone(prodZone) .hostProvisioner(new InMemoryProvisioner(new Hosts(hosts), true, false)) .configConvergenceChecker(new MockConfigConvergenceChecker(2)) .hostedConfigserverConfig(prodZone) .build(); PrepareResult prepareResult = tester.deployApp("src/test/apps/hosted/", "6.1.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(prepareResult.configChangeActions().getRestartActions().isEmpty()); assertEquals(Optional.of(ApplicationReindexing.empty() .withPending("cluster", "music", prepareResult.sessionId())), tester.tenant().getApplicationRepo().database().readReindexingStatus(tester.applicationId())); } @Test public void testThatAppWithFilesWithInvalidFileExtensionFails() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .configserverConfig(new ConfigserverConfig(new ConfigserverConfig.Builder() .hostedVespa(true) .configServerDBDir(uncheck(() -> Files.createTempDirectory("serverdb")).toString()) .configDefinitionsDir(uncheck(() -> Files.createTempDirectory("configdefinitions")).toString()) .fileReferencesDir(uncheck(() -> Files.createTempDirectory("configdefinitions")).toString()))) .modelFactory(createHostedModelFactory(Version.fromString("8.7.6"), Clock.systemUTC())) .build(); try { tester.deployApp("src/test/apps/hosted-invalid-file-extension/", "8.7.6"); fail(); } catch (InvalidApplicationException e) { assertEquals("java.lang.IllegalArgumentException: File in application package with unknown extension: schemas/file-with-invalid.extension, please delete or move file to another directory.", e.getMessage()); } } @Test public void testRedeployWithCloudAccount() { CloudAccount cloudAccount = CloudAccount.from("012345678912"); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder() .vespaVersion("4.5.6") .cloudAccount(cloudAccount)); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals(cloudAccount, ((Deployment) deployment.get()).session().getCloudAccount().get()); } /** Create the given number of hosts using the supplied versions--the last version is repeated as needed. */ private List<Host> createHosts(int count, String ... versions) { return IntStream.rangeClosed(1, count) .mapToObj(i -> createHost("host" + i, versions[Math.min(i, versions.length) - 1])) .toList(); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.ofNullable(version).map(Version::fromString)); } private DeployTester createTester(List<Host> hosts, List<ModelFactory> modelFactories, Zone zone) { return createTester(hosts, modelFactories, zone, Clock.systemUTC()); } private DeployTester createTester(List<Host> hosts, List<ModelFactory> modelFactories, Zone zone, Clock clock) { return createTester(hosts, modelFactories, zone, clock, new MockConfigConvergenceChecker(2)); } private DeployTester createTester(List<Host> hosts, List<ModelFactory> modelFactories, Zone zone, Clock clock, ConfigConvergenceChecker configConvergenceChecker) { return new DeployTester.Builder(temporaryFolder) .modelFactories(modelFactories) .clock(clock) .zone(zone) .hostProvisioner(new InMemoryProvisioner(new Hosts(hosts), true, false)) .configConvergenceChecker(configConvergenceChecker) .hostedConfigserverConfig(zone) .build(); } private static List<ServiceInfo> createServices() { return List.of(new ServiceInfo("serviceName", "serviceType", null, Map.of("clustername", "cluster"), "configId", "hostName"), new ServiceInfo("serviceName2", "serviceType2", null, Map.of("clustername", "cluster"), "configId", "hostName2")); } private static class ConfigChangeActionsModelFactory extends TestModelFactory { private final List<ConfigChangeAction> actions; ConfigChangeActionsModelFactory(Version vespaVersion, ConfigChangeAction... actions) { super(HostedConfigModelRegistry.create(), vespaVersion); this.actions = List.of(actions); } @Override public ModelCreateResult createAndValidateModel(ModelContext modelContext, ValidationParameters validationParameters) { ModelCreateResult result = super.createAndValidateModel(modelContext, validationParameters); return new ModelCreateResult(result.getModel(), actions); } } }
class HostedDeployTest { private final Zone prodZone = new Zone(Environment.prod, RegionName.defaultName()); private final Zone devZone = new Zone(Environment.dev, RegionName.defaultName()); @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testRedeployWithVersion() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); tester.deployApp("src/test/apps/hosted/", "4.5.6"); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals("4.5.6", ((Deployment) deployment.get()).session().getVespaVersion().toString()); } @Test public void testRedeploy() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory()) .build(); ApplicationId appId = tester.applicationId(); tester.deployApp("src/test/apps/hosted/"); assertFalse(tester.applicationRepository().getActiveSession(appId).get().getMetaData().isInternalRedeploy()); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); deployment.get().activate(); assertTrue(tester.applicationRepository().getActiveSession(appId).get().getMetaData().isInternalRedeploy()); } @Test public void testReDeployWithWantedDockerImageRepositoryAndAthenzDomain() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); String dockerImageRepository = "docker.foo.com:4443/bar/baz"; tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder() .vespaVersion("4.5.6") .dockerImageRepository(dockerImageRepository) .athenzDomain("myDomain")); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals("4.5.6", ((Deployment) deployment.get()).session().getVespaVersion().toString()); assertEquals(DockerImage.fromString(dockerImageRepository), ((Deployment) deployment.get()).session().getDockerImageRepository().get()); assertEquals("myDomain", ((Deployment) deployment.get()).session().getAthenzDomain().get().value()); } @Test public void testRedeployWithTenantSecretStores() { List<TenantSecretStore> tenantSecretStores = List.of(new TenantSecretStore("foo", "123", "role")); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder() .vespaVersion("4.5.6") .tenantSecretStores(tenantSecretStores)); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals(tenantSecretStores, ((Deployment) deployment.get()).session().getTenantSecretStores()); } @Test public void testDeployOnUnknownVersion() { List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("1.0.0"))); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactories(modelFactories) .build(); tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder()); try { tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder().vespaVersion("1.0.1").isBootstrap(true)); } catch (InternalServerException expected) { assertTrue(expected.getCause() instanceof UnknownVespaVersionException); } try { tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder().vespaVersion("1.0.1")); fail("Requesting an unknown node version should not be allowed"); } catch (UnknownVespaVersionException expected) { } } @Test public void testDeployMultipleVersions() { List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("6.1.0")), createHostedModelFactory(Version.fromString("6.2.0")), createHostedModelFactory(Version.fromString("7.0.0"))); DeployTester tester = new DeployTester.Builder(temporaryFolder).modelFactories(modelFactories) .hostedConfigserverConfig(Zone.defaultZone()) .build(); tester.deployApp("src/test/apps/hosted/", "6.2.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); } /** * Test that only the minimal set of models are created (model versions used on hosts, the wanted version * and the latest version for the latest major) */ @Test public void testCreateOnlyNeededModelVersions() { List<Host> hosts = createHosts(9, "6.0.0", "6.1.0", null, "6.1.0"); CountingModelFactory factory600 = createHostedModelFactory(Version.fromString("6.0.0")); CountingModelFactory factory610 = createHostedModelFactory(Version.fromString("6.1.0")); CountingModelFactory factory620 = createHostedModelFactory(Version.fromString("6.2.0")); CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory710 = createHostedModelFactory(Version.fromString("7.1.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0")); List<ModelFactory> modelFactories = List.of(factory600, factory610, factory620, factory700, factory710, factory720); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "7.0.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory600.creationCount() > 0); assertTrue(factory610.creationCount() > 0); assertFalse(factory620.creationCount() > 0); assertTrue(factory700.creationCount() > 0); assertFalse(factory710.creationCount() > 0); assertTrue("Newest is always included", factory720.creationCount() > 0); } /** * Test that only the minimal set of models are created (the wanted version and the latest version for * the latest major, since nodes are without version) */ @Test public void testCreateOnlyNeededModelVersionsNewNodes() { List<Host> hosts = createHosts(9, (String) null); CountingModelFactory factory600 = createHostedModelFactory(Version.fromString("6.0.0")); CountingModelFactory factory610 = createHostedModelFactory(Version.fromString("6.1.0")); CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0")); List<ModelFactory> modelFactories = List.of(factory600, factory610, factory700, factory720); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "7.0.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory700.creationCount() > 0); assertTrue("Newest model for latest major version is always included", factory720.creationCount() > 0); } /** * Test that deploying an application in a manually deployed zone creates all needed model versions * (not just the latest one, manually deployed apps always have skipOldConfigModels set to true) */ @Test public void testCreateNeededModelVersionsForManuallyDeployedApps() { List<Host> hosts = createHosts(7, "7.0.0"); CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0"), devZone); CountingModelFactory factory710 = createHostedModelFactory(Version.fromString("7.1.0"), devZone); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0"), devZone); List<ModelFactory> modelFactories = List.of(factory700, factory710, factory720); DeployTester tester = createTester(hosts, modelFactories, devZone); tester.deployApp("src/test/apps/hosted/", "7.2.0"); assertEquals(7, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory700.creationCount() > 0); assertFalse(factory710.creationCount() > 0); assertTrue("Newest model for latest major version is always included", factory720.creationCount() > 0); } /** * Test that deploying an application in a manually deployed zone creates latest model version successfully, * even if creating one of the older model fails */ @Test public void testCreateModelVersionsForManuallyDeployedAppsWhenCreatingFailsForOneVersion() { List<Host> hosts = createHosts(7, "7.0.0"); ModelFactory factory700 = createFailingModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0"), devZone); List<ModelFactory> modelFactories = List.of(factory700, factory720); DeployTester tester = createTester(hosts, modelFactories, devZone, Clock.systemUTC()); tester.deployApp("src/test/apps/hosted/", "7.2.0"); assertEquals(7, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue("Newest model for latest major version is always included", factory720.creationCount() > 0); } /** * Tests that we create the minimal set of models and that version 7.x is created * if creating version 8.x fails (to support upgrades to new major version for applications * that are still using features that do not work on version 8.x) */ @Test public void testWantedVersionIsRequiredAlsoWhenThereIsAnOlderMajorThatDoesNotFailModelBuilding() { int oldMajor = 7; int newMajor = 8; Version wantedVersion = new Version(newMajor, 1, 2); Version oldVersion = new Version(oldMajor, 2, 3); List<Host> hosts = createHosts(9, oldVersion.toFullString()); CountingModelFactory oldFactory = createHostedModelFactory(oldVersion); ModelFactory newFactory = createFailingModelFactory(wantedVersion); List<ModelFactory> modelFactories = List.of(oldFactory, newFactory); DeployTester tester = createTester(hosts, modelFactories, prodZone); assertEquals("Invalid application", assertThrows(IllegalArgumentException.class, () -> tester.deployApp("src/test/apps/hosted/", wantedVersion.toFullString())) .getMessage()); tester.deployApp("src/test/apps/hosted/", oldVersion.toFullString()); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(oldFactory.creationCount() > 0); } /** * Tests that we create the minimal set of models and that version 7.x is created * if creating version 8.x fails (to support upgrades to new major version for applications * that are still using features that do not work on version 8.x) */ @Test public void testCreateLatestMajorOnPreviousMajorIfItFailsOnMajorVersion8() { deployWithModelForLatestMajorVersionFailing(8); } /** * Tests that we fail deployment for version 7.x if creating version 7.x fails (i.e. that we do not skip * building 7.x and only build version 6.x). Skipping creation of models for a major version is only supported * for major version >= 8 (see test above) or when major-version=6 is set in application package. */ @Test(expected = InvalidApplicationException.class) public void testFailingToCreateModelVersion7FailsDeployment() { deployWithModelForLatestMajorVersionFailing(7); } /** * Tests that we create the minimal set of models, but latest model version is created for * previous major if creating latest model version on latest major version fails **/ private void deployWithModelForLatestMajorVersionFailing(int newestMajorVersion) { int oldestMajorVersion = newestMajorVersion - 1; String oldestVersion = oldestMajorVersion + ".0.0"; String newestOnOldMajorVersion = oldestMajorVersion + ".1.0"; String newestOnNewMajorVersion = newestMajorVersion + ".2.0"; List<Host> hosts = createHosts(9, oldestVersion, newestOnOldMajorVersion); CountingModelFactory factory1 = createHostedModelFactory(Version.fromString(oldestVersion)); CountingModelFactory factory2 = createHostedModelFactory(Version.fromString(newestOnOldMajorVersion)); ModelFactory factory3 = createFailingModelFactory(Version.fromString(newestOnNewMajorVersion)); List<ModelFactory> modelFactories = List.of(factory1, factory2, factory3); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", oldestVersion); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(factory1.creationCount() > 0); assertTrue("Latest model for previous major version is included if latest model for latest major version fails to build", factory2.creationCount() > 0); } /** * Tests that we fail deployment if a needed model version fails to be created */ @Test(expected = InvalidApplicationException.class) public void testDeploymentFailsIfNeededModelVersionFails() { List<Host> hosts = createHosts(7, "7.0.0"); List<ModelFactory> modelFactories = List.of(createFailingModelFactory(Version.fromString("7.0.0")), createHostedModelFactory(Version.fromString("7.1.0"))); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "7.1.0"); } /** * Test that deploying an application works when there are no allocated hosts in the system * (the bootstrap a new zone case, so deploying the routing app since that is the first deployment * that will be done) */ @Test public void testCreateOnlyNeededModelVersionsWhenNoHostsAllocated() { CountingModelFactory factory700 = createHostedModelFactory(Version.fromString("7.0.0")); CountingModelFactory factory720 = createHostedModelFactory(Version.fromString("7.2.0")); List<ModelFactory> modelFactories = List.of(factory700, factory720); DeployTester tester = createTester(createHosts(1, (String) null), modelFactories, prodZone); tester.deployApp("src/test/apps/hosted-routing-app/", "7.2.0"); assertFalse(factory700.creationCount() > 0); assertTrue("Newest is always included", factory720.creationCount() > 0); } @Test public void testAccessControlIsOnlyCheckedWhenNoProdDeploymentExists() { List<Host> hosts = createHosts(18, "6.0.0"); List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("6.0.0")), createHostedModelFactory(Version.fromString("6.1.0")), createHostedModelFactory(Version.fromString("6.2.0"))); DeployTester tester = createTester(hosts, modelFactories, prodZone, Clock.systemUTC()); ApplicationId applicationId = tester.applicationId(); tester.deployApp("src/test/apps/hosted/", "6.0.0"); assertEquals(9, tester.getAllocatedHostsOf(applicationId).getHosts().size()); tester.deployApp("src/test/apps/hosted-no-write-access-control", "6.1.0"); assertEquals(9, tester.getAllocatedHostsOf(applicationId).getHosts().size()); } @Test public void testRedeployAfterExpiredValidationOverride() { ManualClock clock = new ManualClock("2016-10-09T00:00:00"); List<ModelFactory> modelFactories = List.of(createHostedModelFactory(clock), createFailingModelFactory(Version.fromString("1.0.0"))); DeployTester tester = new DeployTester.Builder(temporaryFolder).modelFactories(modelFactories) .build(); tester.deployApp("src/test/apps/validationOverride/"); { Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); deployment.get().activate(); } clock.advance(Duration.ofDays(2)); { Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(); assertTrue(deployment.isPresent()); deployment.get().activate(); } { try { tester.deployApp("src/test/apps/validationOverride/", "myApp"); fail("Expected redeployment to fail"); } catch (Exception expected) { } } } @Test public void testThatConfigChangeActionsAreCollectedFromAllModels() { List<Host> hosts = createHosts(9, "6.1.0", "6.2.0"); List<ServiceInfo> services = createServices(); List<ModelFactory> modelFactories = List.of( new ConfigChangeActionsModelFactory(Version.fromString("6.1.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "change", services)), new ConfigChangeActionsModelFactory(Version.fromString("6.2.0"), new VespaRestartAction(ClusterSpec.Id.from("test"), "other change", services))); DeployTester tester = createTester(hosts, modelFactories, prodZone); tester.deployApp("src/test/apps/hosted/", "6.2.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); } @Test private void assertLogContainsMessage(DeployHandlerLogger log, String message) { assertEquals(1, SlimeUtils.entriesStream(log.slime().get().field("log")) .map(entry -> entry.field("message").asString()) .filter(m -> m.contains(message)) .count()); } @Test public void testThatAllowedConfigChangeActionsAreActedUpon() { List<Host> hosts = createHosts(9, "6.1.0"); List<ServiceInfo> services = createServices(); ManualClock clock = new ManualClock(Instant.EPOCH); List<ModelFactory> modelFactories = List.of( new ConfigChangeActionsModelFactory(Version.fromString("6.1.0"), VespaReindexAction.of(ClusterSpec.Id.from("test"), ValidationId.indexModeChange, "reindex please", services, "music"), new VespaRestartAction(ClusterSpec.Id.from("test"), "change", services))); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactories(modelFactories) .clock(clock) .zone(prodZone) .hostProvisioner(new InMemoryProvisioner(new Hosts(hosts), true, false)) .configConvergenceChecker(new MockConfigConvergenceChecker(2)) .hostedConfigserverConfig(prodZone) .build(); PrepareResult prepareResult = tester.deployApp("src/test/apps/hosted/", "6.1.0"); assertEquals(9, tester.getAllocatedHostsOf(tester.applicationId()).getHosts().size()); assertTrue(prepareResult.configChangeActions().getRestartActions().isEmpty()); assertEquals(Optional.of(ApplicationReindexing.empty() .withPending("cluster", "music", prepareResult.sessionId())), tester.tenant().getApplicationRepo().database().readReindexingStatus(tester.applicationId())); } @Test public void testThatAppWithFilesWithInvalidFileExtensionFails() { DeployTester tester = new DeployTester.Builder(temporaryFolder) .configserverConfig(new ConfigserverConfig(new ConfigserverConfig.Builder() .hostedVespa(true) .configServerDBDir(uncheck(() -> Files.createTempDirectory("serverdb")).toString()) .configDefinitionsDir(uncheck(() -> Files.createTempDirectory("configdefinitions")).toString()) .fileReferencesDir(uncheck(() -> Files.createTempDirectory("configdefinitions")).toString()))) .modelFactory(createHostedModelFactory(Version.fromString("8.7.6"), Clock.systemUTC())) .build(); try { tester.deployApp("src/test/apps/hosted-invalid-file-extension/", "8.7.6"); fail(); } catch (InvalidApplicationException e) { assertEquals("java.lang.IllegalArgumentException: File in application package with unknown extension: schemas/file-with-invalid.extension, please delete or move file to another directory.", e.getMessage()); } } @Test public void testRedeployWithCloudAccount() { CloudAccount cloudAccount = CloudAccount.from("012345678912"); DeployTester tester = new DeployTester.Builder(temporaryFolder) .modelFactory(createHostedModelFactory(Version.fromString("4.5.6"), Clock.systemUTC())) .build(); tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder() .vespaVersion("4.5.6") .cloudAccount(cloudAccount)); Optional<com.yahoo.config.provision.Deployment> deployment = tester.redeployFromLocalActive(tester.applicationId()); assertTrue(deployment.isPresent()); deployment.get().activate(); assertEquals(cloudAccount, ((Deployment) deployment.get()).session().getCloudAccount().get()); } /** Create the given number of hosts using the supplied versions--the last version is repeated as needed. */ private List<Host> createHosts(int count, String ... versions) { return IntStream.rangeClosed(1, count) .mapToObj(i -> createHost("host" + i, versions[Math.min(i, versions.length) - 1])) .toList(); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.ofNullable(version).map(Version::fromString)); } private DeployTester createTester(List<Host> hosts, List<ModelFactory> modelFactories, Zone zone) { return createTester(hosts, modelFactories, zone, Clock.systemUTC()); } private DeployTester createTester(List<Host> hosts, List<ModelFactory> modelFactories, Zone zone, Clock clock) { return createTester(hosts, modelFactories, zone, clock, new MockConfigConvergenceChecker(2)); } private DeployTester createTester(List<Host> hosts, List<ModelFactory> modelFactories, Zone zone, Clock clock, ConfigConvergenceChecker configConvergenceChecker) { return new DeployTester.Builder(temporaryFolder) .modelFactories(modelFactories) .clock(clock) .zone(zone) .hostProvisioner(new InMemoryProvisioner(new Hosts(hosts), true, false)) .configConvergenceChecker(configConvergenceChecker) .hostedConfigserverConfig(zone) .build(); } private static List<ServiceInfo> createServices() { return List.of(new ServiceInfo("serviceName", "serviceType", null, Map.of("clustername", "cluster"), "configId", "hostName"), new ServiceInfo("serviceName2", "serviceType2", null, Map.of("clustername", "cluster"), "configId", "hostName2")); } private static class ConfigChangeActionsModelFactory extends TestModelFactory { private final List<ConfigChangeAction> actions; ConfigChangeActionsModelFactory(Version vespaVersion, ConfigChangeAction... actions) { super(HostedConfigModelRegistry.create(), vespaVersion); this.actions = List.of(actions); } @Override public ModelCreateResult createAndValidateModel(ModelContext modelContext, ValidationParameters validationParameters) { ModelCreateResult result = super.createAndValidateModel(modelContext, validationParameters); return new ModelCreateResult(result.getModel(), actions); } } }
Consider adding metric definition to `ContainerMetrics`
private void doReconfigurationLoop() { while (!shutdownReconfiguration) { try { var start = Instant.now(); ContainerBuilder builder = createBuilderWithGuiceBindings(); Runnable cleanupTask = configurer.waitForNextGraphGeneration(builder.guiceModules().activate(), false); initializeAndActivateContainer(builder, cleanupTask); var metric = configurer.getComponent(Metric.class); metric.set("jdisc.application.component_graph.creation_time_millis", Duration.between(start, Instant.now()).toMillis(), null); metric.add("jdisc.application.component_graph.reconfigurations", 1L, null); } catch (UncheckedInterruptedException | SubscriberClosedException | ConfigInterruptedException e) { break; } catch (Exception | LinkageError e) { tryReportFailedComponentGraphConstructionMetric(configurer, e); log.log(Level.SEVERE, "Reconfiguration failed, your application package must be fixed, unless this is a " + "JNI reload issue: " + Exceptions.toMessageString(e), e); } catch (Error e) { com.yahoo.protect.Process.logAndDie("java.lang.Error on reconfiguration: We are probably in " + "a bad state and will terminate", e); } } log.fine("Reconfiguration loop exited"); }
metric.set("jdisc.application.component_graph.creation_time_millis", Duration.between(start, Instant.now()).toMillis(), null);
private void doReconfigurationLoop() { while (!shutdownReconfiguration) { try { var start = Instant.now(); ContainerBuilder builder = createBuilderWithGuiceBindings(); Runnable cleanupTask = configurer.waitForNextGraphGeneration(builder.guiceModules().activate(), false); initializeAndActivateContainer(builder, cleanupTask); var metric = configurer.getComponent(Metric.class); metric.set(JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.baseName(), Duration.between(start, Instant.now()).toMillis(), null); metric.add(JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.baseName(), 1L, null); } catch (UncheckedInterruptedException | SubscriberClosedException | ConfigInterruptedException e) { break; } catch (Exception | LinkageError e) { tryReportFailedComponentGraphConstructionMetric(configurer, e); log.log(Level.SEVERE, "Reconfiguration failed, your application package must be fixed, unless this is a " + "JNI reload issue: " + Exceptions.toMessageString(e), e); } catch (Error e) { com.yahoo.protect.Process.logAndDie("java.lang.Error on reconfiguration: We are probably in " + "a bad state and will terminate", e); } } log.fine("Reconfiguration loop exited"); }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
Why?
private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); Timeout socketTimeout = Timeout.ofMinutes(15); MinimalH2AsyncClient client = HttpAsyncClients.createHttp2Minimal( H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 8), 2)) .setTcpNoDelay(true) .setSoTimeout(socketTimeout) .build(), tlsStrategyBuilder.build()); ConnectionConfig connCfg = ConnectionConfig.custom() .setSocketTimeout(socketTimeout) .setConnectTimeout(Timeout.ofSeconds(10)) .build(); client.setConnectionConfigResolver(__ -> connCfg); return client; }
Timeout socketTimeout = Timeout.ofMinutes(15);
private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); Timeout socketTimeout = Timeout.ofMinutes(15); MinimalH2AsyncClient client = HttpAsyncClients.createHttp2Minimal( H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 8), 2)) .setTcpNoDelay(true) .setSoTimeout(socketTimeout) .build(), tlsStrategyBuilder.build()); ConnectionConfig connCfg = ConnectionConfig.custom() .setSocketTimeout(socketTimeout) .setConnectTimeout(Timeout.ofSeconds(10)) .build(); client.setConnectionConfigResolver(__ -> connCfg); return client; }
class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } }
class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } }
The request/stream will be failed out on socket timeout
private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); Timeout socketTimeout = Timeout.ofMinutes(15); MinimalH2AsyncClient client = HttpAsyncClients.createHttp2Minimal( H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 8), 2)) .setTcpNoDelay(true) .setSoTimeout(socketTimeout) .build(), tlsStrategyBuilder.build()); ConnectionConfig connCfg = ConnectionConfig.custom() .setSocketTimeout(socketTimeout) .setConnectTimeout(Timeout.ofSeconds(10)) .build(); client.setConnectionConfigResolver(__ -> connCfg); return client; }
Timeout socketTimeout = Timeout.ofMinutes(15);
private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); Timeout socketTimeout = Timeout.ofMinutes(15); MinimalH2AsyncClient client = HttpAsyncClients.createHttp2Minimal( H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 8), 2)) .setTcpNoDelay(true) .setSoTimeout(socketTimeout) .build(), tlsStrategyBuilder.build()); ConnectionConfig connCfg = ConnectionConfig.custom() .setSocketTimeout(socketTimeout) .setConnectTimeout(Timeout.ofSeconds(10)) .build(); client.setConnectionConfigResolver(__ -> connCfg); return client; }
class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } }
class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } }
Once should be enough?
public double maintain() { NodeList nodes = nodeRepository().nodes().list().sortedBy(Comparator.comparing(Node::hostname)); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateContainerMetrics(nodes); updateTenantUsageMetrics(nodes); updateRepairTicketMetrics(nodes); updateAllocationMetrics(nodes); updateClusterMetrics(nodes); updateClusterMetrics(nodes); return 1.0; }
updateClusterMetrics(nodes);
public double maintain() { NodeList nodes = nodeRepository().nodes().list().sortedBy(Comparator.comparing(Node::hostname)); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateContainerMetrics(nodes); updateTenantUsageMetrics(nodes); updateRepairTicketMetrics(nodes); updateAllocationMetrics(nodes); updateClusterMetrics(nodes); return 1.0; }
class MetricsReporter extends NodeRepositoryMaintainer { private final Set<Pair<Metric.Context, String>> nonZeroMetrics = new HashSet<>(); private final Metric metric; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override private void updateAllocationMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> !node.allocation().get().owner().instance().isTester()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / ((double) activeNodes + (double) nonActiveNodes); } Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); metric.set("nodes.active", activeNodes, context); metric.set("nodes.nonActive", nonActiveNodes, context); metric.set("nodes.nonActiveFraction", nonActiveFraction, context); }); } private void updateClusterMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.state() == State.active) .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, clusterNodes) -> { Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); updateExclusiveSwitchMetrics(clusterNodes, nodes, context); updateClusterCostMetrics(clusterNodes, context); }); } private void updateExclusiveSwitchMetrics(List<Node> clusterNodes, NodeList allNodes, Metric.Context context) { NodeList clusterHosts = allNodes.parentsOf(NodeList.copyOf(clusterNodes)); long nodesOnExclusiveSwitch = NodeList.copyOf(clusterNodes).onExclusiveSwitch(clusterHosts).size(); double exclusiveSwitchRatio = nodesOnExclusiveSwitch / (double) clusterNodes.size(); metric.set("nodes.exclusiveSwitchFraction", exclusiveSwitchRatio,context); } private void updateClusterCostMetrics(List<Node> clusterNodes, Metric.Context context) { double cost = clusterNodes.stream().mapToDouble(node -> node.resources().cost()).sum(); metric.set("cluster.cost", cost, context); } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().nodes().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } /** * NB: Keep this metric set in sync with internal configserver metric pre-aggregation */ private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); if (node.cloudAccount().isEnclave(nodeRepository().zone())) { metric.set("hasWireguardKey", node.wireguardPubKey().isPresent() ? 1 : 0, context); } } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> nodeRepository().orchestrator().getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); metric.set("numberOfServicesUnknown", servicesCount.getOrDefault(ServiceStatus.UNKNOWN, 0L), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.isDown(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { var nodesByState = nodes.nodeType(NodeType.tenant) .asList().stream() .collect(Collectors.groupingBy(Node::state)); var hostsByState = nodes.nodeType(NodeType.host) .asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { var nodesInState = nodesByState.getOrDefault(state, List.of()); var hostsInState = hostsByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Nodes", nodesInState.size(), null); metric.set("hostedVespa." + state.name() + "Hosts", hostsInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); LatencyMetrics acquireLatencyMetrics = lockMetrics.getAndResetAcquireLatencyMetrics(); setNonZero("lockAttempt.acquireMaxActiveLatency", acquireLatencyMetrics.maxActiveLatencySeconds(), context); setNonZero("lockAttempt.acquireHz", acquireLatencyMetrics.startHz(), context); setNonZero("lockAttempt.acquireLoad", acquireLatencyMetrics.load(), context); LatencyMetrics lockedLatencyMetrics = lockMetrics.getAndResetLockedLatencyMetrics(); setNonZero("lockAttempt.lockedLatency", lockedLatencyMetrics.maxLatencySeconds(), context); setNonZero("lockAttempt.lockedLoad", lockedLatencyMetrics.load(), context); setNonZero("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); setNonZero("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); setNonZero("lockAttempt.errors", lockMetrics.getAndResetAcquireFailedCount() + lockMetrics.getAndResetReleaseFailedCount() + lockMetrics.getAndResetNakedReleaseCount() + lockMetrics.getAndResetAcquireWithoutReleaseCount() + lockMetrics.getAndResetForeignReleaseCount(), context); }); } private void setNonZero(String key, Number value, Metric.Context context) { var metricKey = new Pair<>(context, key); if (Double.compare(value.doubleValue(), 0.0) != 0) { metric.set(key, value, context); nonZeroMetrics.add(metricKey); } else if (nonZeroMetrics.remove(metricKey)) { metric.set(key, value, context); } } private void updateContainerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private void updateRepairTicketMetrics(NodeList nodes) { nodes.nodeType(NodeType.host).stream() .map(node -> node.reports().getReport("repairTicket")) .flatMap(Optional::stream) .map(report -> report.getInspector().field("status").asString()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())) .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status)))); } static Map<String, String> dimensions(ApplicationId application, ClusterSpec.Id cluster) { Map<String, String> dimensions = new HashMap<>(dimensions(application)); dimensions.put("clusterid", cluster.value()); return dimensions; } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node host) { return nodes.childrenOf(host).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(host.flavor().resources().justNumbers(), NodeResources::subtract); } }
class MetricsReporter extends NodeRepositoryMaintainer { private final Set<Pair<Metric.Context, String>> nonZeroMetrics = new HashSet<>(); private final Metric metric; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override private void updateAllocationMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> !node.allocation().get().owner().instance().isTester()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / ((double) activeNodes + (double) nonActiveNodes); } Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); metric.set("nodes.active", activeNodes, context); metric.set("nodes.nonActive", nonActiveNodes, context); metric.set("nodes.nonActiveFraction", nonActiveFraction, context); }); } private void updateClusterMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.state() == State.active) .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, clusterNodes) -> { Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); updateExclusiveSwitchMetrics(clusterNodes, nodes, context); updateClusterCostMetrics(clusterNodes, context); }); } private void updateExclusiveSwitchMetrics(List<Node> clusterNodes, NodeList allNodes, Metric.Context context) { NodeList clusterHosts = allNodes.parentsOf(NodeList.copyOf(clusterNodes)); long nodesOnExclusiveSwitch = NodeList.copyOf(clusterNodes).onExclusiveSwitch(clusterHosts).size(); double exclusiveSwitchRatio = nodesOnExclusiveSwitch / (double) clusterNodes.size(); metric.set("nodes.exclusiveSwitchFraction", exclusiveSwitchRatio,context); } private void updateClusterCostMetrics(List<Node> clusterNodes, Metric.Context context) { double cost = clusterNodes.stream().mapToDouble(node -> node.resources().cost()).sum(); metric.set("cluster.cost", cost, context); } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().nodes().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } /** * NB: Keep this metric set in sync with internal configserver metric pre-aggregation */ private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); if (node.cloudAccount().isEnclave(nodeRepository().zone())) { metric.set("hasWireguardKey", node.wireguardPubKey().isPresent() ? 1 : 0, context); } } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> nodeRepository().orchestrator().getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); metric.set("numberOfServicesUnknown", servicesCount.getOrDefault(ServiceStatus.UNKNOWN, 0L), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.isDown(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { var nodesByState = nodes.nodeType(NodeType.tenant) .asList().stream() .collect(Collectors.groupingBy(Node::state)); var hostsByState = nodes.nodeType(NodeType.host) .asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { var nodesInState = nodesByState.getOrDefault(state, List.of()); var hostsInState = hostsByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Nodes", nodesInState.size(), null); metric.set("hostedVespa." + state.name() + "Hosts", hostsInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); LatencyMetrics acquireLatencyMetrics = lockMetrics.getAndResetAcquireLatencyMetrics(); setNonZero("lockAttempt.acquireMaxActiveLatency", acquireLatencyMetrics.maxActiveLatencySeconds(), context); setNonZero("lockAttempt.acquireHz", acquireLatencyMetrics.startHz(), context); setNonZero("lockAttempt.acquireLoad", acquireLatencyMetrics.load(), context); LatencyMetrics lockedLatencyMetrics = lockMetrics.getAndResetLockedLatencyMetrics(); setNonZero("lockAttempt.lockedLatency", lockedLatencyMetrics.maxLatencySeconds(), context); setNonZero("lockAttempt.lockedLoad", lockedLatencyMetrics.load(), context); setNonZero("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); setNonZero("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); setNonZero("lockAttempt.errors", lockMetrics.getAndResetAcquireFailedCount() + lockMetrics.getAndResetReleaseFailedCount() + lockMetrics.getAndResetNakedReleaseCount() + lockMetrics.getAndResetAcquireWithoutReleaseCount() + lockMetrics.getAndResetForeignReleaseCount(), context); }); } private void setNonZero(String key, Number value, Metric.Context context) { var metricKey = new Pair<>(context, key); if (Double.compare(value.doubleValue(), 0.0) != 0) { metric.set(key, value, context); nonZeroMetrics.add(metricKey); } else if (nonZeroMetrics.remove(metricKey)) { metric.set(key, value, context); } } private void updateContainerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private void updateRepairTicketMetrics(NodeList nodes) { nodes.nodeType(NodeType.host).stream() .map(node -> node.reports().getReport("repairTicket")) .flatMap(Optional::stream) .map(report -> report.getInspector().field("status").asString()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())) .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status)))); } static Map<String, String> dimensions(ApplicationId application, ClusterSpec.Id cluster) { Map<String, String> dimensions = new HashMap<>(dimensions(application)); dimensions.put("clusterid", cluster.value()); return dimensions; } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node host) { return nodes.childrenOf(host).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(host.flavor().resources().justNumbers(), NodeResources::subtract); } }
fixed
public double maintain() { NodeList nodes = nodeRepository().nodes().list().sortedBy(Comparator.comparing(Node::hostname)); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateContainerMetrics(nodes); updateTenantUsageMetrics(nodes); updateRepairTicketMetrics(nodes); updateAllocationMetrics(nodes); updateClusterMetrics(nodes); updateClusterMetrics(nodes); return 1.0; }
updateClusterMetrics(nodes);
public double maintain() { NodeList nodes = nodeRepository().nodes().list().sortedBy(Comparator.comparing(Node::hostname)); ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot(); updateZoneMetrics(); updateCacheMetrics(); updateMaintenanceMetrics(); nodes.forEach(node -> updateNodeMetrics(node, serviceModel)); updateNodeCountMetrics(nodes); updateLockMetrics(); updateContainerMetrics(nodes); updateTenantUsageMetrics(nodes); updateRepairTicketMetrics(nodes); updateAllocationMetrics(nodes); updateClusterMetrics(nodes); return 1.0; }
class MetricsReporter extends NodeRepositoryMaintainer { private final Set<Pair<Metric.Context, String>> nonZeroMetrics = new HashSet<>(); private final Metric metric; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override private void updateAllocationMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> !node.allocation().get().owner().instance().isTester()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / ((double) activeNodes + (double) nonActiveNodes); } Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); metric.set("nodes.active", activeNodes, context); metric.set("nodes.nonActive", nonActiveNodes, context); metric.set("nodes.nonActiveFraction", nonActiveFraction, context); }); } private void updateClusterMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.state() == State.active) .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, clusterNodes) -> { Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); updateExclusiveSwitchMetrics(clusterNodes, nodes, context); updateClusterCostMetrics(clusterNodes, context); }); } private void updateExclusiveSwitchMetrics(List<Node> clusterNodes, NodeList allNodes, Metric.Context context) { NodeList clusterHosts = allNodes.parentsOf(NodeList.copyOf(clusterNodes)); long nodesOnExclusiveSwitch = NodeList.copyOf(clusterNodes).onExclusiveSwitch(clusterHosts).size(); double exclusiveSwitchRatio = nodesOnExclusiveSwitch / (double) clusterNodes.size(); metric.set("nodes.exclusiveSwitchFraction", exclusiveSwitchRatio,context); } private void updateClusterCostMetrics(List<Node> clusterNodes, Metric.Context context) { double cost = clusterNodes.stream().mapToDouble(node -> node.resources().cost()).sum(); metric.set("cluster.cost", cost, context); } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().nodes().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } /** * NB: Keep this metric set in sync with internal configserver metric pre-aggregation */ private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); if (node.cloudAccount().isEnclave(nodeRepository().zone())) { metric.set("hasWireguardKey", node.wireguardPubKey().isPresent() ? 1 : 0, context); } } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> nodeRepository().orchestrator().getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); metric.set("numberOfServicesUnknown", servicesCount.getOrDefault(ServiceStatus.UNKNOWN, 0L), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.isDown(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { var nodesByState = nodes.nodeType(NodeType.tenant) .asList().stream() .collect(Collectors.groupingBy(Node::state)); var hostsByState = nodes.nodeType(NodeType.host) .asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { var nodesInState = nodesByState.getOrDefault(state, List.of()); var hostsInState = hostsByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Nodes", nodesInState.size(), null); metric.set("hostedVespa." + state.name() + "Hosts", hostsInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); LatencyMetrics acquireLatencyMetrics = lockMetrics.getAndResetAcquireLatencyMetrics(); setNonZero("lockAttempt.acquireMaxActiveLatency", acquireLatencyMetrics.maxActiveLatencySeconds(), context); setNonZero("lockAttempt.acquireHz", acquireLatencyMetrics.startHz(), context); setNonZero("lockAttempt.acquireLoad", acquireLatencyMetrics.load(), context); LatencyMetrics lockedLatencyMetrics = lockMetrics.getAndResetLockedLatencyMetrics(); setNonZero("lockAttempt.lockedLatency", lockedLatencyMetrics.maxLatencySeconds(), context); setNonZero("lockAttempt.lockedLoad", lockedLatencyMetrics.load(), context); setNonZero("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); setNonZero("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); setNonZero("lockAttempt.errors", lockMetrics.getAndResetAcquireFailedCount() + lockMetrics.getAndResetReleaseFailedCount() + lockMetrics.getAndResetNakedReleaseCount() + lockMetrics.getAndResetAcquireWithoutReleaseCount() + lockMetrics.getAndResetForeignReleaseCount(), context); }); } private void setNonZero(String key, Number value, Metric.Context context) { var metricKey = new Pair<>(context, key); if (Double.compare(value.doubleValue(), 0.0) != 0) { metric.set(key, value, context); nonZeroMetrics.add(metricKey); } else if (nonZeroMetrics.remove(metricKey)) { metric.set(key, value, context); } } private void updateContainerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private void updateRepairTicketMetrics(NodeList nodes) { nodes.nodeType(NodeType.host).stream() .map(node -> node.reports().getReport("repairTicket")) .flatMap(Optional::stream) .map(report -> report.getInspector().field("status").asString()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())) .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status)))); } static Map<String, String> dimensions(ApplicationId application, ClusterSpec.Id cluster) { Map<String, String> dimensions = new HashMap<>(dimensions(application)); dimensions.put("clusterid", cluster.value()); return dimensions; } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node host) { return nodes.childrenOf(host).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(host.flavor().resources().justNumbers(), NodeResources::subtract); } }
class MetricsReporter extends NodeRepositoryMaintainer { private final Set<Pair<Metric.Context, String>> nonZeroMetrics = new HashSet<>(); private final Metric metric; private final ServiceMonitor serviceMonitor; private final Map<Map<String, String>, Metric.Context> contextMap = new HashMap<>(); private final Supplier<Integer> pendingRedeploymentsSupplier; MetricsReporter(NodeRepository nodeRepository, Metric metric, ServiceMonitor serviceMonitor, Supplier<Integer> pendingRedeploymentsSupplier, Duration interval) { super(nodeRepository, interval, metric); this.metric = metric; this.serviceMonitor = serviceMonitor; this.pendingRedeploymentsSupplier = pendingRedeploymentsSupplier; } @Override private void updateAllocationMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> !node.allocation().get().owner().instance().isTester()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, allocatedNodes) -> { int activeNodes = 0; int nonActiveNodes = 0; for (var node : allocatedNodes) { if (node.state() == State.active) { activeNodes++; } else { nonActiveNodes++; } } double nonActiveFraction; if (activeNodes == 0) { nonActiveFraction = 1; } else { nonActiveFraction = (double) nonActiveNodes / ((double) activeNodes + (double) nonActiveNodes); } Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); metric.set("nodes.active", activeNodes, context); metric.set("nodes.nonActive", nonActiveNodes, context); metric.set("nodes.nonActiveFraction", nonActiveFraction, context); }); } private void updateClusterMetrics(NodeList nodes) { Map<ClusterId, List<Node>> byCluster = nodes.stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.state() == State.active) .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id()))); byCluster.forEach((clusterId, clusterNodes) -> { Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster())); updateExclusiveSwitchMetrics(clusterNodes, nodes, context); updateClusterCostMetrics(clusterNodes, context); }); } private void updateExclusiveSwitchMetrics(List<Node> clusterNodes, NodeList allNodes, Metric.Context context) { NodeList clusterHosts = allNodes.parentsOf(NodeList.copyOf(clusterNodes)); long nodesOnExclusiveSwitch = NodeList.copyOf(clusterNodes).onExclusiveSwitch(clusterHosts).size(); double exclusiveSwitchRatio = nodesOnExclusiveSwitch / (double) clusterNodes.size(); metric.set("nodes.exclusiveSwitchFraction", exclusiveSwitchRatio,context); } private void updateClusterCostMetrics(List<Node> clusterNodes, Metric.Context context) { double cost = clusterNodes.stream().mapToDouble(node -> node.resources().cost()).sum(); metric.set("cluster.cost", cost, context); } private void updateZoneMetrics() { metric.set("zone.working", nodeRepository().nodes().isWorking() ? 1 : 0, null); } private void updateCacheMetrics() { CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats(); metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null); metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null); metric.set("cache.nodeObject.size", nodeCacheStats.size(), null); CacheStats curatorCacheStats = nodeRepository().database().cacheStats(); metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null); metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null); metric.set("cache.curator.size", curatorCacheStats.size(), null); } private void updateMaintenanceMetrics() { metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null); } /** * NB: Keep this metric set in sync with internal configserver metric pre-aggregation */ private void updateNodeMetrics(Node node, ServiceModel serviceModel) { Metric.Context context; Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) { ApplicationId applicationId = allocation.get().owner(); Map<String, String> dimensions = new HashMap<>(dimensions(applicationId)); dimensions.put("state", node.state().name()); dimensions.put("host", node.hostname()); dimensions.put("clustertype", allocation.get().membership().cluster().type().name()); dimensions.put("clusterid", allocation.get().membership().cluster().id().value()); context = getContext(dimensions); long wantedRestartGeneration = allocation.get().restartGeneration().wanted(); metric.set("wantedRestartGeneration", wantedRestartGeneration, context); long currentRestartGeneration = allocation.get().restartGeneration().current(); metric.set("currentRestartGeneration", currentRestartGeneration, context); boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration; metric.set("wantToRestart", wantToRestart ? 1 : 0, context); metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context); Version wantedVersion = allocation.get().membership().cluster().vespaVersion(); double wantedVersionNumber = getVersionAsNumber(wantedVersion); metric.set("wantedVespaVersion", wantedVersionNumber, context); Optional<Version> currentVersion = node.status().vespaVersion(); boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion); metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context); if (node.cloudAccount().isEnclave(nodeRepository().zone())) { metric.set("hasWireguardKey", node.wireguardPubKey().isPresent() ? 1 : 0, context); } } else { context = getContext(Map.of("state", node.state().name(), "host", node.hostname())); } Optional<Version> currentVersion = node.status().vespaVersion(); if (currentVersion.isPresent()) { double currentVersionNumber = getVersionAsNumber(currentVersion.get()); metric.set("currentVespaVersion", currentVersionNumber, context); } long wantedRebootGeneration = node.status().reboot().wanted(); metric.set("wantedRebootGeneration", wantedRebootGeneration, context); long currentRebootGeneration = node.status().reboot().current(); metric.set("currentRebootGeneration", currentRebootGeneration, context); boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration; metric.set("wantToReboot", wantToReboot ? 1 : 0, context); metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context); metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context); metric.set("failReport", NodeFailer.reasonsToFailHost(node).isEmpty() ? 0 : 1, context); HostName hostname = new HostName(node.hostname()); serviceModel.getApplication(hostname) .map(ApplicationInstance::reference) .map(reference -> nodeRepository().orchestrator().getHostInfo(reference, hostname)) .ifPresent(info -> { int suspended = info.status().isSuspended() ? 1 : 0; metric.set("suspended", suspended, context); long suspendedSeconds = info.suspendedSince() .map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds()) .orElse(0L); metric.set("suspendedSeconds", suspendedSeconds, context); }); long numberOfServices; List<ServiceInstance> services = serviceModel.getServiceInstancesByHostName().get(hostname); if (services == null) { numberOfServices = 0; } else { Map<ServiceStatus, Long> servicesCount = services.stream().collect( Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting())); numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum(); metric.set( "numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context); metric.set( "numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context); long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L); metric.set("numberOfServicesDown", numberOfServicesDown, context); metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context); metric.set("numberOfServicesUnknown", servicesCount.getOrDefault(ServiceStatus.UNKNOWN, 0L), context); boolean down = NodeHealthTracker.allDown(services); metric.set("nodeFailerBadNode", (down ? 1 : 0), context); boolean nodeDownInNodeRepo = node.isDown(); metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context); } metric.set("numberOfServices", numberOfServices, context); } private static String toApp(ApplicationId applicationId) { return applicationId.application().value() + "." + applicationId.instance().value(); } /** * A version 6.163.20 will be returned as a number 163.020. The major * version can normally be inferred. As long as the micro version stays * below 1000 these numbers sort like Version. */ private static double getVersionAsNumber(Version version) { return version.getMinor() + version.getMicro() / 1000.0; } private Metric.Context getContext(Map<String, String> dimensions) { return contextMap.computeIfAbsent(dimensions, metric::createContext); } private void updateNodeCountMetrics(NodeList nodes) { var nodesByState = nodes.nodeType(NodeType.tenant) .asList().stream() .collect(Collectors.groupingBy(Node::state)); var hostsByState = nodes.nodeType(NodeType.host) .asList().stream() .collect(Collectors.groupingBy(Node::state)); for (State state : State.values()) { var nodesInState = nodesByState.getOrDefault(state, List.of()); var hostsInState = hostsByState.getOrDefault(state, List.of()); metric.set("hostedVespa." + state.name() + "Nodes", nodesInState.size(), null); metric.set("hostedVespa." + state.name() + "Hosts", hostsInState.size(), null); } } private void updateLockMetrics() { LockStats.getGlobal().getLockMetricsByPath() .forEach((lockPath, lockMetrics) -> { Metric.Context context = getContext(Map.of("lockPath", lockPath)); LatencyMetrics acquireLatencyMetrics = lockMetrics.getAndResetAcquireLatencyMetrics(); setNonZero("lockAttempt.acquireMaxActiveLatency", acquireLatencyMetrics.maxActiveLatencySeconds(), context); setNonZero("lockAttempt.acquireHz", acquireLatencyMetrics.startHz(), context); setNonZero("lockAttempt.acquireLoad", acquireLatencyMetrics.load(), context); LatencyMetrics lockedLatencyMetrics = lockMetrics.getAndResetLockedLatencyMetrics(); setNonZero("lockAttempt.lockedLatency", lockedLatencyMetrics.maxLatencySeconds(), context); setNonZero("lockAttempt.lockedLoad", lockedLatencyMetrics.load(), context); setNonZero("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context); setNonZero("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context); setNonZero("lockAttempt.errors", lockMetrics.getAndResetAcquireFailedCount() + lockMetrics.getAndResetReleaseFailedCount() + lockMetrics.getAndResetNakedReleaseCount() + lockMetrics.getAndResetAcquireWithoutReleaseCount() + lockMetrics.getAndResetForeignReleaseCount(), context); }); } private void setNonZero(String key, Number value, Metric.Context context) { var metricKey = new Pair<>(context, key); if (Double.compare(value.doubleValue(), 0.0) != 0) { metric.set(key, value, context); nonZeroMetrics.add(metricKey); } else if (nonZeroMetrics.remove(metricKey)) { metric.set(key, value, context); } } private void updateContainerMetrics(NodeList nodes) { NodeResources totalCapacity = getCapacityTotal(nodes); metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null); metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null); metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null); NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes); metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null); metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null); metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null); } private void updateTenantUsageMetrics(NodeList nodes) { nodes.nodeType(NodeType.tenant).stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner())) .forEach( (applicationId, applicationNodes) -> { var allocatedCapacity = applicationNodes.stream() .map(node -> node.allocation().get().requestedResources().justNumbers()) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); var context = getContext(dimensions(applicationId)); metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context); metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context); metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context); } ); } private void updateRepairTicketMetrics(NodeList nodes) { nodes.nodeType(NodeType.host).stream() .map(node -> node.reports().getReport("repairTicket")) .flatMap(Optional::stream) .map(report -> report.getInspector().field("status").asString()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())) .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status)))); } static Map<String, String> dimensions(ApplicationId application, ClusterSpec.Id cluster) { Map<String, String> dimensions = new HashMap<>(dimensions(application)); dimensions.put("clusterid", cluster.value()); return dimensions; } private static Map<String, String> dimensions(ApplicationId application) { return Map.of("tenantName", application.tenant().value(), "applicationId", application.serializedForm().replace(':', '.'), "app", toApp(application)); } private static NodeResources getCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(host -> host.flavor().resources()) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources getFreeCapacityTotal(NodeList nodes) { return nodes.hosts().state(State.active).asList().stream() .map(n -> freeCapacityOf(nodes, n)) .map(NodeResources::justNumbers) .reduce(new NodeResources(0, 0, 0, 0, any).justNumbers(), NodeResources::add); } private static NodeResources freeCapacityOf(NodeList nodes, Node host) { return nodes.childrenOf(host).asList().stream() .map(node -> node.flavor().resources().justNumbers()) .reduce(host.flavor().resources().justNumbers(), NodeResources::subtract); } }
If so, do we even need to set it since it's given by dynamicProvisioning()? In all clouds up to now, the allowEnclave lags dynamicProvisioning by years, so I think it is better with a whitelist of clouds.
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
.allowEnclave(cloudConfig.dynamicProvisioning())
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
I just want to have this check in one place so it's easy to change if needed in the future. I could change this line to `configserverConfig.cloud().equals("aws") || configserverConfig.cloud().equals("gcp")` if you think that's better?
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
.allowEnclave(cloudConfig.dynamicProvisioning())
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
Yes, that would be more intuitive.
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
.allowEnclave(cloudConfig.dynamicProvisioning())
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
In #27108
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
.allowEnclave(cloudConfig.dynamicProvisioning())
public Zone(ConfigserverConfig configserverConfig, CloudConfig cloudConfig) { this(Cloud.builder() .name(CloudName.from(configserverConfig.cloud())) .dynamicProvisioning(cloudConfig.dynamicProvisioning()) .allowHostSharing(cloudConfig.allowHostSharing()) .allowEnclave(cloudConfig.dynamicProvisioning()) .requireAccessControl(cloudConfig.requireAccessControl()) .account(CloudAccount.from(cloudConfig.account())) .build(), SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
class Zone { private final Cloud cloud; private final SystemName systemName; private final Environment environment; private final RegionName region; @Inject /** Create from environment and region. Use for testing. */ public Zone(Environment environment, RegionName region) { this(SystemName.defaultSystem(), environment, region); } /** Create from system, environment and region. Use for testing. */ public Zone(SystemName systemName, Environment environment, RegionName region) { this(Cloud.defaultCloud(), systemName, environment, region); } /** Create from cloud, system, environment and region. Also used for testing. */ public Zone(Cloud cloud, SystemName systemName, Environment environment, RegionName region) { this.cloud = cloud; this.systemName = systemName; this.environment = environment; this.region = region; } public Cloud getCloud() { return cloud(); } /** Returns the current cloud */ public Cloud cloud() { return cloud; } /** Returns the current system */ public SystemName system() { return systemName; } /** Returns the current environment */ public Environment environment() { return environment; } /** Returns the current region */ public RegionName region() { return region; } /** Returns the string "environment.region" */ public String systemLocalValue() { return environment + "." + region; } /** Do not use */ public static Zone defaultZone() { return new Zone(Cloud.defaultCloud(), SystemName.defaultSystem(), Environment.defaultEnvironment(), RegionName.defaultName()); } @Override public String toString() { return "zone " + environment + "." + region; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Zone)) return false; Zone zone = (Zone) o; return environment == zone.environment && Objects.equals(region, zone.region); } @Override public int hashCode() { return Objects.hash(environment, region); } }
The node-repo metrics are not reported via metrics-proxy, so there is no need to add them here.
private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.HOSTS.last()); addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); return metrics; }
addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last());
private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.HOSTS.last()); addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDistributorMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count()); addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last()); addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last()); addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last)); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count()); addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count()); addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count()); addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName()); addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average()); addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max()); addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last()); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last()); addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_JVM.last()); addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last)); addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS, EnumSet.of(min, max, sum, count, last)); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate()); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last()); addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last()); addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last()); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getSearchChainMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.PEAK_QPS.max()); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate)); addMetric(metrics, ContainerMetrics.QUERIES.rate()); addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count()); addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum, last, average)); addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate()); addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate()); addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate()); addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate()); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count)); return metrics; } private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate()); return metrics; } private static Set<Metric> getDistributorMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_MOVING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_IN.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_SYNCING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_THROTTLED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_CHANGED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count()); return metrics; } private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { metrics.add(new Metric(nameWithSuffix)); } private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDistributorMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count()); addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last()); addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last()); addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last)); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count()); addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count()); addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count()); addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName()); addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average()); addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max()); addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last()); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last()); addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_JVM.last()); addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last)); addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS, EnumSet.of(min, max, sum, count, last)); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate()); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last()); addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last()); addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last()); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getSearchChainMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.PEAK_QPS.max()); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate)); addMetric(metrics, ContainerMetrics.QUERIES.rate()); addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count()); addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum, last, average)); addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate()); addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate()); addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate()); addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate()); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count)); return metrics; } private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate()); return metrics; } private static Set<Metric> getDistributorMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_MOVING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_IN.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_SYNCING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_THROTTLED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_CHANGED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count()); return metrics; } private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { metrics.add(new Metric(nameWithSuffix)); } private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
There are other config server metrics in this set so why not these?
private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.HOSTS.last()); addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); return metrics; }
addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last());
private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.HOSTS.last()); addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDistributorMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count()); addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last()); addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last()); addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last)); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count()); addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count()); addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count()); addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName()); addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average()); addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max()); addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last()); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last()); addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_JVM.last()); addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last)); addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS, EnumSet.of(min, max, sum, count, last)); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate()); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last()); addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last()); addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last()); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getSearchChainMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.PEAK_QPS.max()); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate)); addMetric(metrics, ContainerMetrics.QUERIES.rate()); addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count()); addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum, last, average)); addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate()); addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate()); addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate()); addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate()); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count)); return metrics; } private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate()); return metrics; } private static Set<Metric> getDistributorMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_MOVING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_IN.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_SYNCING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_THROTTLED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_CHANGED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count()); return metrics; } private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { metrics.add(new Metric(nameWithSuffix)); } private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDistributorMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count()); addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last()); addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last()); addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last)); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count()); addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count()); addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count()); addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName()); addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average()); addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max()); addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last()); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last()); addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_JVM.last()); addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last)); addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS, EnumSet.of(min, max, sum, count, last)); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate()); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last()); addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last()); addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last()); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getSearchChainMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.PEAK_QPS.max()); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate)); addMetric(metrics, ContainerMetrics.QUERIES.rate()); addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count()); addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum, last, average)); addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate()); addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate()); addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate()); addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate()); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count)); return metrics; } private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate()); return metrics; } private static Set<Metric> getDistributorMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_MOVING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_IN.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_SYNCING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_THROTTLED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_CHANGED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count()); return metrics; } private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { metrics.add(new Metric(nameWithSuffix)); } private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
These sets are only allow-lists for the metrics-proxy, so adding them is a NOP. But I guess they could be processed by the proxy some time in the future.
private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.HOSTS.last()); addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); return metrics; }
addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last());
private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); addMetric(metrics, ConfigServerMetrics.HOSTS.last()); addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDistributorMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count()); addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last()); addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last()); addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last)); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count()); addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count()); addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count()); addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName()); addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average()); addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max()); addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last()); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last()); addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_JVM.last()); addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last)); addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS, EnumSet.of(min, max, sum, count, last)); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate()); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last()); addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last()); addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last()); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getSearchChainMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.PEAK_QPS.max()); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate)); addMetric(metrics, ContainerMetrics.QUERIES.rate()); addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count()); addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum, last, average)); addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate()); addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate()); addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate()); addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate()); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count)); return metrics; } private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate()); return metrics; } private static Set<Metric> getDistributorMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_MOVING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_IN.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_SYNCING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_THROTTLED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_CHANGED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count()); return metrics; } private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { metrics.add(new Metric(nameWithSuffix)); } private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDistributorMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count()); addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last()); addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last()); addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last)); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count()); addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count()); addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count()); addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count()); addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName()); addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName()); addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, last, average)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE, EnumSet.of(sum, count, last, min, max)); addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate()); addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max)); addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average()); addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average()); addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max()); addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last()); addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last()); addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last()); addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last()); addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate()); addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate()); addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS.last()); addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate()); addMetric(metrics, ContainerMetrics.JDISC_JVM.last()); addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last)); addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS, EnumSet.of(min, max, sum, count, last)); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate()); addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate()); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last()); addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last()); addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last()); addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last()); addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last()); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getSearchChainMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, ContainerMetrics.PEAK_QPS.max()); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate)); addMetric(metrics, ContainerMetrics.QUERIES.rate()); addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate()); addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count()); addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count()); addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum, last, average)); addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count)); addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile)); addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate()); addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count)); addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max)); addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count)); addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate()); addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate()); addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate()); addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate()); addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate()); addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate()); addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate()); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_USED_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_DEAD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ONHOLD_BYTES.average()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count)); return metrics; } private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average()); addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate()); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count)); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate()); addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate()); return metrics; } private static Set<Metric> getDistributorMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_MOVING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_OUT.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_COPYING_IN.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKET_REPLICAS_SYNCING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_THROTTLED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_CHANGED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_BLOCKED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_SOURCE_ONLY_COPY_DELETE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average()); addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count)); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average()); addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count()); return metrics; } private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { metrics.add(new Metric(nameWithSuffix)); } private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
🦄
private void doTestVisitRequestWithParams(String httpReqParams, Consumer<VisitorParameters> paramChecker) { try (var driver = new RequestHandlerTestDriver(handler)) { access.expect(parameters -> { paramChecker.accept(parameters); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "great success"); }); var response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "documents": [ ], "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); } }
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "great success");
private void doTestVisitRequestWithParams(String httpReqParams, Consumer<VisitorParameters> paramChecker) { try (var driver = new RequestHandlerTestDriver(handler)) { access.expect(parameters -> { paramChecker.accept(parameters); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "great success"); }); var response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "documents": [ ], "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); } }
class DocumentV1ApiTest { final AllClustersBucketSpacesConfig bucketConfig = new AllClustersBucketSpacesConfig.Builder() .cluster("content", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace(FixedBucketSpaces.defaultSpace()))) .build(); final ClusterListConfig clusterConfig = new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder().configid("config-id") .name("content")) .build(); final DocumentOperationExecutorConfig executorConfig = new DocumentOperationExecutorConfig.Builder() .maxThrottled(2) .resendDelayMillis(1 << 30) .build(); final DocumentmanagerConfig docConfig = Deriver.getDocumentManagerConfig("src/test/cfg/music.sd") .ignoreundefinedfields(true).build(); final DocumentTypeManager manager = new DocumentTypeManager(docConfig); final Document doc1 = new Document(manager.getDocumentType("music"), "id:space:music::one"); final Document doc2 = new Document(manager.getDocumentType("music"), "id:space:music:n=1:two"); final Document doc3 = new Document(manager.getDocumentType("music"), "id:space:music:g=a:three"); { doc1.setFieldValue("artist", "Tom Waits"); doc1.setFieldValue("embedding", new TensorFieldValue(Tensor.from("tensor(x[3]):[1,2,3]"))); doc2.setFieldValue("artist", "Asa-Chan & Jun-Ray"); doc2.setFieldValue("embedding", new TensorFieldValue(Tensor.from("tensor(x[3]):[4,5,6]"))); } final Map<String, StorageCluster> clusters = Map.of("content", new StorageCluster("content", Map.of("music", "default"))); ManualClock clock; MockDocumentAccess access; MockMetric metric; MetricReceiver metrics; DocumentV1ApiHandler handler; @Before public void setUp() { clock = new ManualClock(); access = new MockDocumentAccess(docConfig); metric = new MockMetric(); metrics = new MetricReceiver.MockReceiver(); handler = new DocumentV1ApiHandler(clock, Duration.ofMillis(1), metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig); } @After public void tearDown() { handler.destroy(); } @Test public void testResolveCluster() { assertEquals("content", DocumentV1ApiHandler.resolveCluster(Optional.empty(), clusters).name()); assertEquals("content", DocumentV1ApiHandler.resolveCluster(Optional.of("content"), clusters).name()); try { DocumentV1ApiHandler.resolveCluster(Optional.empty(), Map.of()); fail("Should fail without any clusters"); } catch (IllegalArgumentException e) { assertEquals("Your Vespa deployment has no content clusters, so the document API is not enabled", e.getMessage()); } try { DocumentV1ApiHandler.resolveCluster(Optional.of("blargh"), clusters); fail("Should not find this cluster"); } catch (IllegalArgumentException e) { assertEquals("Your Vespa deployment has no content cluster 'blargh', only 'content'", e.getMessage()); } try { Map<String, StorageCluster> twoClusters = new TreeMap<>(); twoClusters.put("one", new StorageCluster("one", Map.of())); twoClusters.put("two", new StorageCluster("two", Map.of())); DocumentV1ApiHandler.resolveCluster(Optional.empty(), twoClusters); fail("More than one cluster and no document type should fail"); } catch (IllegalArgumentException e) { assertEquals("Please specify one of the content clusters in your Vespa deployment: 'one', 'two'", e.getMessage()); } StorageCluster cluster = DocumentV1ApiHandler.resolveCluster(Optional.of("content"), clusters); assertEquals(FixedBucketSpaces.defaultSpace(), DocumentV1ApiHandler.resolveBucket(cluster, Optional.of("music"), List.of(), Optional.empty())); assertEquals(FixedBucketSpaces.globalSpace(), DocumentV1ApiHandler.resolveBucket(cluster, Optional.empty(), List.of(FixedBucketSpaces.globalSpace()), Optional.of("global"))); } @Test public void testResponses() { RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler); List<AckToken> tokens = List.of(new AckToken(null), new AckToken(null), new AckToken(null)); var response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/not-found", "message": "Nothing at '/document/v1/not-found'. Available paths are: /document/v1/ /document/v1/{namespace}/{documentType}/docid/ /document/v1/{namespace}/{documentType}/group/{group}/ /document/v1/{namespace}/{documentType}/number/{number}/ /document/v1/{namespace}/{documentType}/docid/{*} /document/v1/{namespace}/{documentType}/group/{group}/{*} /document/v1/{namespace}/{documentType}/number/{number}/{*}" }""", response.readAll()); assertEquals("application/json; charset=UTF-8", response.getResponse().headers().getFirst("Content-Type")); assertEquals(404, response.getStatus()); access.expect(tokens); Trace visitorTrace = new Trace(9); visitorTrace.trace(7, "Tracy Chapman", false); visitorTrace.getRoot().addChild(new TraceNode().setStrict(false) .addChild("Fast Car") .addChild("Baby Can I Hold You")); access.visitorTrace = visitorTrace; access.expect(parameters -> { assertEquals("content", parameters.getRoute().toString()); assertEquals("default", parameters.getBucketSpace()); assertEquals(1025, parameters.getMaxTotalHits()); assertEquals(100, ((StaticThrottlePolicy) parameters.getThrottlePolicy()).getMaxPendingCount()); assertEquals("[id]", parameters.getFieldSet()); assertEquals("(all the things)", parameters.getDocumentSelection()); assertTrue(6000 <= parameters.getSessionTimeoutMs()); assertEquals(9, parameters.getTraceLevel()); assertEquals(1_000_000, parameters.getFromTimestamp()); assertEquals(2_000_000, parameters.getToTimestamp()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0)); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1)); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2)); VisitorStatistics statistics = new VisitorStatistics(); statistics.setBucketsVisited(1); statistics.setDocumentsVisited(3); parameters.getControlHandler().onVisitorStatistics(statistics); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK"); }); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&timeout=6&tracelevel=9&fromTimestamp=1000000&toTimestamp=2000000"); assertSameJson(""" { "pathId": "/document/v1", "documents": [ { "id": "id:space:music::one", "fields": { "artist": "Tom Waits",\s "embedding": { "type": "tensor(x[3])", "values": [1.0,2.0,3.0] }\s } }, { "id": "id:space:music:n=1:two", "fields": { "artist": "Asa-Chan & Jun-Ray",\s "embedding": { "type": "tensor(x[3])", "values": [4.0,5.0,6.0] }\s } }, { "id": "id:space:music:g=a:three", "fields": {} } ], "documentCount": 3, "trace": [ { "message": "Tracy Chapman" }, { "fork": [ { "message": "Fast Car" }, { "message": "Baby Can I Hold You" } ] } ] }""", response.readAll()); assertEquals(200, response.getStatus()); access.visitorTrace = null; access.expect(tokens); access.expect(parameters -> { assertEquals("content", parameters.getRoute().toString()); assertEquals("default", parameters.getBucketSpace()); assertEquals(1025, parameters.getMaxTotalHits()); assertEquals(1, ((StaticThrottlePolicy) parameters.getThrottlePolicy()).getMaxPendingCount()); assertEquals("[id]", parameters.getFieldSet()); assertEquals("(all the things)", parameters.getDocumentSelection()); assertTrue(6000 <= parameters.getTimeoutMs()); assertEquals(4, parameters.getSlices()); assertEquals(1, parameters.getSliceId()); assertEquals(0, parameters.getFromTimestamp()); assertEquals(0, parameters.getToTimestamp()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0)); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1)); VisitorStatistics statistics = new VisitorStatistics(); statistics.setBucketsVisited(1); statistics.setDocumentsVisited(2); parameters.getControlHandler().onVisitorStatistics(statistics); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK"); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2)); }); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&timeout=6&stream=true&slices=4&sliceId=1"); assertSameJson(""" { "pathId": "/document/v1", "documents": [ { "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": { "type": "tensor(x[3])", "values": [1.0,2.0,3.0] } } }, { "id": "id:space:music:n=1:two", "fields": { "artist": "Asa-Chan & Jun-Ray", "embedding": { "type": "tensor(x[3])", "values": [4.0,5.0,6.0] } } } ], "documentCount": 2 }""", response.readAll()); assertEquals(200, response.getStatus()); ProgressToken progress = new ProgressToken(); VisitorIterator.createFromExplicitBucketSet(Set.of(new BucketId(1), new BucketId(2)), 8, progress) .update(new BucketId(1), new BucketId(1)); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space')", parameters.getDocumentSelection()); assertEquals(progress.serializeToString(), parameters.getResumeToken().serializeToString()); throw new IllegalArgumentException("parse failure"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "message": "parse failure" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space')", parameters.getDocumentSelection()); parameters.getControlHandler().onProgress(progress); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.FAILURE, "failure?"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documents": [], "documentCount": 0, "message": "failure?" }""", response.readAll()); assertEquals(200, response.getStatus()); assertNull(response.getResponse().headers().get("X-Vespa-Ignored-Fields")); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "message": "Must specify 'destinationCluster' at '/document/v1/space/music/docid'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { assertEquals("[Content:cluster=content]", parameters.getRemoteDataHandler()); assertEquals("[document]", parameters.fieldSet()); assertEquals(60_000L, parameters.getSessionTimeoutMs()); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "We made it!"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); access.expect(tokens.subList(2, 3)); access.expect(parameters -> { assertEquals("(true) and (music) and (id.namespace=='space')", parameters.getDocumentSelection()); assertEquals("[id]", parameters.fieldSet()); assertEquals(10_000, parameters.getSessionTimeoutMs()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2)); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "Won't care"); }); access.session.expect((update, parameters) -> { DocumentUpdate expectedUpdate = new DocumentUpdate(doc3.getDataType(), doc3.getId()); expectedUpdate.addFieldUpdate(FieldUpdate.createAssign(doc3.getField("artist"), new StringFieldValue("Lisa Ekdahl"))); expectedUpdate.setCondition(new TestAndSetCondition("true")); assertEquals(expectedUpdate, update); parameters.responseHandler().get().handleResponse(new UpdateResponse(0, false)); assertEquals(parameters().withRoute("content"), parameters); return new Result(); }); response = driver.sendRequest("http: """ { "fields": { "artist": { "assign": "Lisa Ekdahl" }, "nonexisting": { "assign": "Ignored" } } }"""); assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); assertEquals("true", response.getResponse().headers().get("X-Vespa-Ignored-Fields").get(0).toString()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/group/troupe", "message": "Must specify 'cluster' at '/document/v1/space/music/group/troupe'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/group/troupe", "message": "Must specify 'selection' at '/document/v1/space/music/group/troupe'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(tokens.subList(0, 1)); access.expect(parameters -> { assertEquals("(false) and (music) and (id.namespace=='space')", parameters.getDocumentSelection()); assertEquals("[id]", parameters.fieldSet()); assertEquals(60_000, parameters.getSessionTimeoutMs()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(0)); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.ABORTED, "Huzzah?"); }); access.session.expect((remove, parameters) -> { DocumentRemove expectedRemove = new DocumentRemove(doc2.getId()); expectedRemove.setCondition(new TestAndSetCondition("false")); assertEquals(expectedRemove, remove); assertEquals(parameters().withRoute("content"), parameters); parameters.responseHandler().get().handleResponse(new DocumentIdResponse(0, doc2.getId(), "boom", Response.Outcome.ERROR)); return new Result(); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documentCount": 0, "message": "boom" }""", response.readAll()); assertEquals(502, response.getStatus()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "message": "Must specify 'selection' at '/document/v1/'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "message": "Must specify 'cluster' at '/document/v1/'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space') and (id.group=='best\\'')", parameters.getDocumentSelection()); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.FAILURE, "error"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/group/best%27", "documents": [], "documentCount": 0, "message": "error" }""", response.readAll()); assertEquals(502, response.getStatus()); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space') and (id.user==123)", parameters.getDocumentSelection()); VisitorStatistics statistics = new VisitorStatistics(); statistics.setBucketsVisited(1); statistics.setDocumentsVisited(0); parameters.getControlHandler().onVisitorStatistics(statistics); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.ABORTED, "aborted"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/number/123", "documents": [ ], "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); access.expect(parameters -> { fail("unreachable"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "message": "toTimestamp must be greater than, or equal to, fromTimestamp" }""", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((id, parameters) -> { assertEquals(doc1.getId(), id); assertEquals(parameters().withRoute("content").withFieldSet("go"), parameters); parameters.responseHandler().get().handleResponse(new DocumentResponse(0, null)); return new Result(); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one" }""", response.readAll()); assertEquals(404, response.getStatus()); access.session.expect((id, parameters) -> { assertEquals(doc1.getId(), id); assertEquals(parameters().withFieldSet("music:[document]"), parameters); parameters.responseHandler().get().handleResponse(new DocumentResponse(0, doc1)); return new Result(); }); response = driver.sendRequest("http: String shortJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": { "type": "tensor(x[3])","values": [1.0, 2.0, 3.0]} } } """; assertEquals(200, response.getStatus()); assertSameJson(shortJson, response.readAll()); response = driver.sendRequest("http: String longJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": { "type": "tensor(x[3])","cells": [{"address":{"x":"0"},"value":1.0},{"address":{"x":"1"},"value": 2.0},{"address":{"x":"2"},"value": 3.0}]} } } """; assertEquals(200, response.getStatus()); assertSameJson(longJson, response.readAll()); response = driver.sendRequest("http: String shortDirectJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": [1.0, 2.0, 3.0]} } } """; assertEquals(200, response.getStatus()); assertSameJson(shortDirectJson, response.readAll()); response = driver.sendRequest("http: String longDirectJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": [{"address":{"x":"0"},"value":1.0},{"address":{"x":"1"},"value": 2.0},{"address":{"x":"2"},"value": 3.0}] } } """; assertEquals(200, response.getStatus()); assertSameJson(longDirectJson, response.readAll()); access.session.expect((id, parameters) -> { assertEquals(new DocumentId("id:space:music::one/two/three"), id); assertEquals(parameters().withFieldSet("music:[document]"), parameters); parameters.responseHandler().get().handleResponse(new DocumentResponse(0)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one/two/three\"," + " \"id\": \"id:space:music::one/two/three\"" + "}", response.readAll()); assertEquals(404, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"May not specify 'dryRun' at '/document/v1/space/music/number/1/two'\"\n" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: "NOT JSON, NOT PARSED"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: "NOT JSON, NOT PARSED"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: "NOT JSON, NOT PARSED"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((update, parameters) -> { DocumentUpdate expectedUpdate = new DocumentUpdate(doc3.getDataType(), doc3.getId()); expectedUpdate.addFieldUpdate(FieldUpdate.createAssign(doc3.getField("artist"), new StringFieldValue("Lisa Ekdahl"))); expectedUpdate.setCreateIfNonExistent(true); assertEquals(expectedUpdate, update); assertEquals(parameters(), parameters); parameters.responseHandler().get().handleResponse(new UpdateResponse(0, true)); return new Result(); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/a/three\"," + " \"id\": \"id:space:music:g=a:three\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((put, parameters) -> { DocumentPut expectedPut = new DocumentPut(doc2); expectedPut.setCondition(new TestAndSetCondition("test it")); expectedPut.setCreateIfNonExistent(true); assertEquals(expectedPut, put); assertEquals(parameters().withTraceLevel(9), parameters); Trace trace = new Trace(9); trace.trace(7, "Tracy Chapman", false); trace.getRoot().addChild(new TraceNode().setStrict(false) .addChild("Fast Car") .addChild("Baby Can I Hold You")); parameters.responseHandler().get().handleResponse(new DocumentResponse(0, doc2, trace)); return new Result(); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"," + " \"embedding\": { \"values\": [4.0,5.0,6.0] } " + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"trace\": [" + " {" + " \"message\": \"Tracy Chapman\"" + " }," + " {" + " \"fork\": [" + " {" + " \"message\": \"Fast Car\"" + " }," + " {" + " \"message\": \"Baby Can I Hold You\"" + " }" + " ]" + " }" + " ]" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Could not read document, no document?\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: "{" + " ┻━┻︵ \\(°□°)/ ︵ ┻━┻" + "}"); Inspector responseRoot = SlimeUtils.jsonToSlime(response.readAll()).get(); assertEquals("/document/v1/space/music/number/1/two", responseRoot.field("pathId").asString()); assertTrue(responseRoot.field("message").asString().startsWith("Unexpected character ('┻' (code 9531 / 0x253b)): was expecting double-quote to start field name")); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/house/group/a/three\"," + " \"message\": \"Document type house does not exist\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((update, parameters) -> { parameters.responseHandler().get().handleResponse(new UpdateResponse(0, false)); return new Result(); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"The Shivers\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/sonny\"," + " \"id\": \"id:space:music::sonny\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((remove, parameters) -> { DocumentRemove expectedRemove = new DocumentRemove(doc2.getId()); expectedRemove.setCondition(new TestAndSetCondition("false")); assertEquals(expectedRemove, remove); assertEquals(parameters().withRoute("route"), parameters); parameters.responseHandler().get().handleResponse(new DocumentIdResponse(0, doc2.getId())); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Expected non-empty value for request property 'route'\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Your Vespa deployment has no content cluster 'throw-me', only 'content'\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((id, parameters) -> { assertFalse(clock.instant().plusSeconds(1000).isAfter(parameters.deadline().get())); parameters.responseHandler().get().handleResponse(new Response(0, "timeout", Response.Outcome.TIMEOUT)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"timeout\"" + "}", response.readAll()); assertEquals(504, response.getStatus()); access.session.expect((id, parameters) -> { parameters.responseHandler().get().handleResponse(new Response(0, "disk full", Response.Outcome.INSUFFICIENT_STORAGE)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"disk full\"" + "}", response.readAll()); assertEquals(507, response.getStatus()); access.session.expect((id, parameters) -> { parameters.responseHandler().get().handleResponse(new Response(0, "no dice", Response.Outcome.CONDITION_FAILED)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"no dice\"" + "}", response.readAll()); assertEquals(412, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("https: assertEquals("", response.readAll()); assertEquals(204, response.getStatus()); assertEquals("GET,POST,PUT,DELETE", response.getResponse().headers().getFirst("Allow")); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("https: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"message\": \"'PATCH' not allowed at '/document/v1/space/music/docid/one'. Allowed methods are: GET, POST, PUT, DELETE\"" + "}", response.readAll()); assertEquals(405, response.getStatus()); access.session.expect((id, parameters) -> new Result(Result.ResultType.TRANSIENT_ERROR, Result.toError(Result.ResultType.TRANSIENT_ERROR))); var response1 = driver.sendRequest("http: var response2 = driver.sendRequest("http: var response3 = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Rejecting execution due to overload: 2 requests already enqueued\"" + "}", response3.readAll()); assertEquals(429, response3.getStatus()); access.session.expect((id, parameters) -> new Result(Result.ResultType.FATAL_ERROR, Result.toError(Result.ResultType.FATAL_ERROR))); handler.dispatchEnqueued(); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"[FATAL_ERROR @ localhost]: FATAL_ERROR\"" + "}", response1.readAll()); assertEquals(502, response1.getStatus()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"[FATAL_ERROR @ localhost]: FATAL_ERROR\"" + "}", response2.readAll()); assertEquals(502, response2.getStatus()); AtomicReference<ResponseHandler> handler = new AtomicReference<>(); access.session.expect((id, parameters) -> { handler.set(parameters.responseHandler().get()); return new Result(); }); try { var response4 = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"message\": \"Timeout after 1ms\"" + "}", response4.readAll()); assertEquals(504, response4.getStatus()); } finally { if (handler.get() != null) handler.get().handleResponse(new Response(0)); } assertEquals(3, metric.metrics().get("httpapi_succeeded").get(Map.of()), 0); assertEquals(1, metric.metrics().get("httpapi_condition_not_met").get(Map.of()), 0); assertEquals(1, metric.metrics().get("httpapi_not_found").get(Map.of()), 0); assertEquals(1, metric.metrics().get("httpapi_failed").get(Map.of()), 0); driver.close(); }
class DocumentV1ApiTest { final AllClustersBucketSpacesConfig bucketConfig = new AllClustersBucketSpacesConfig.Builder() .cluster("content", new AllClustersBucketSpacesConfig.Cluster.Builder() .documentType("music", new AllClustersBucketSpacesConfig.Cluster.DocumentType.Builder() .bucketSpace(FixedBucketSpaces.defaultSpace()))) .build(); final ClusterListConfig clusterConfig = new ClusterListConfig.Builder() .storage(new ClusterListConfig.Storage.Builder().configid("config-id") .name("content")) .build(); final DocumentOperationExecutorConfig executorConfig = new DocumentOperationExecutorConfig.Builder() .maxThrottled(2) .resendDelayMillis(1 << 30) .build(); final DocumentmanagerConfig docConfig = Deriver.getDocumentManagerConfig("src/test/cfg/music.sd") .ignoreundefinedfields(true).build(); final DocumentTypeManager manager = new DocumentTypeManager(docConfig); final Document doc1 = new Document(manager.getDocumentType("music"), "id:space:music::one"); final Document doc2 = new Document(manager.getDocumentType("music"), "id:space:music:n=1:two"); final Document doc3 = new Document(manager.getDocumentType("music"), "id:space:music:g=a:three"); { doc1.setFieldValue("artist", "Tom Waits"); doc1.setFieldValue("embedding", new TensorFieldValue(Tensor.from("tensor(x[3]):[1,2,3]"))); doc2.setFieldValue("artist", "Asa-Chan & Jun-Ray"); doc2.setFieldValue("embedding", new TensorFieldValue(Tensor.from("tensor(x[3]):[4,5,6]"))); } final Map<String, StorageCluster> clusters = Map.of("content", new StorageCluster("content", Map.of("music", "default"))); ManualClock clock; MockDocumentAccess access; MockMetric metric; MetricReceiver metrics; DocumentV1ApiHandler handler; @Before public void setUp() { clock = new ManualClock(); access = new MockDocumentAccess(docConfig); metric = new MockMetric(); metrics = new MetricReceiver.MockReceiver(); handler = new DocumentV1ApiHandler(clock, Duration.ofMillis(1), metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig); } @After public void tearDown() { handler.destroy(); } @Test public void testResolveCluster() { assertEquals("content", DocumentV1ApiHandler.resolveCluster(Optional.empty(), clusters).name()); assertEquals("content", DocumentV1ApiHandler.resolveCluster(Optional.of("content"), clusters).name()); try { DocumentV1ApiHandler.resolveCluster(Optional.empty(), Map.of()); fail("Should fail without any clusters"); } catch (IllegalArgumentException e) { assertEquals("Your Vespa deployment has no content clusters, so the document API is not enabled", e.getMessage()); } try { DocumentV1ApiHandler.resolveCluster(Optional.of("blargh"), clusters); fail("Should not find this cluster"); } catch (IllegalArgumentException e) { assertEquals("Your Vespa deployment has no content cluster 'blargh', only 'content'", e.getMessage()); } try { Map<String, StorageCluster> twoClusters = new TreeMap<>(); twoClusters.put("one", new StorageCluster("one", Map.of())); twoClusters.put("two", new StorageCluster("two", Map.of())); DocumentV1ApiHandler.resolveCluster(Optional.empty(), twoClusters); fail("More than one cluster and no document type should fail"); } catch (IllegalArgumentException e) { assertEquals("Please specify one of the content clusters in your Vespa deployment: 'one', 'two'", e.getMessage()); } StorageCluster cluster = DocumentV1ApiHandler.resolveCluster(Optional.of("content"), clusters); assertEquals(FixedBucketSpaces.defaultSpace(), DocumentV1ApiHandler.resolveBucket(cluster, Optional.of("music"), List.of(), Optional.empty())); assertEquals(FixedBucketSpaces.globalSpace(), DocumentV1ApiHandler.resolveBucket(cluster, Optional.empty(), List.of(FixedBucketSpaces.globalSpace()), Optional.of("global"))); } @Test public void testResponses() { RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler); List<AckToken> tokens = List.of(new AckToken(null), new AckToken(null), new AckToken(null)); var response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/not-found", "message": "Nothing at '/document/v1/not-found'. Available paths are: /document/v1/ /document/v1/{namespace}/{documentType}/docid/ /document/v1/{namespace}/{documentType}/group/{group}/ /document/v1/{namespace}/{documentType}/number/{number}/ /document/v1/{namespace}/{documentType}/docid/{*} /document/v1/{namespace}/{documentType}/group/{group}/{*} /document/v1/{namespace}/{documentType}/number/{number}/{*}" }""", response.readAll()); assertEquals("application/json; charset=UTF-8", response.getResponse().headers().getFirst("Content-Type")); assertEquals(404, response.getStatus()); access.expect(tokens); Trace visitorTrace = new Trace(9); visitorTrace.trace(7, "Tracy Chapman", false); visitorTrace.getRoot().addChild(new TraceNode().setStrict(false) .addChild("Fast Car") .addChild("Baby Can I Hold You")); access.visitorTrace = visitorTrace; access.expect(parameters -> { assertEquals("content", parameters.getRoute().toString()); assertEquals("default", parameters.getBucketSpace()); assertEquals(1025, parameters.getMaxTotalHits()); assertEquals(100, ((StaticThrottlePolicy) parameters.getThrottlePolicy()).getMaxPendingCount()); assertEquals("[id]", parameters.getFieldSet()); assertEquals("(all the things)", parameters.getDocumentSelection()); assertTrue(6000 <= parameters.getSessionTimeoutMs()); assertEquals(9, parameters.getTraceLevel()); assertEquals(1_000_000, parameters.getFromTimestamp()); assertEquals(2_000_000, parameters.getToTimestamp()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0)); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1)); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2)); VisitorStatistics statistics = new VisitorStatistics(); statistics.setBucketsVisited(1); statistics.setDocumentsVisited(3); parameters.getControlHandler().onVisitorStatistics(statistics); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK"); }); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&timeout=6&tracelevel=9&fromTimestamp=1000000&toTimestamp=2000000"); assertSameJson(""" { "pathId": "/document/v1", "documents": [ { "id": "id:space:music::one", "fields": { "artist": "Tom Waits",\s "embedding": { "type": "tensor(x[3])", "values": [1.0,2.0,3.0] }\s } }, { "id": "id:space:music:n=1:two", "fields": { "artist": "Asa-Chan & Jun-Ray",\s "embedding": { "type": "tensor(x[3])", "values": [4.0,5.0,6.0] }\s } }, { "id": "id:space:music:g=a:three", "fields": {} } ], "documentCount": 3, "trace": [ { "message": "Tracy Chapman" }, { "fork": [ { "message": "Fast Car" }, { "message": "Baby Can I Hold You" } ] } ] }""", response.readAll()); assertEquals(200, response.getStatus()); access.visitorTrace = null; access.expect(tokens); access.expect(parameters -> { assertEquals("content", parameters.getRoute().toString()); assertEquals("default", parameters.getBucketSpace()); assertEquals(1025, parameters.getMaxTotalHits()); assertEquals(1, ((StaticThrottlePolicy) parameters.getThrottlePolicy()).getMaxPendingCount()); assertEquals("[id]", parameters.getFieldSet()); assertEquals("(all the things)", parameters.getDocumentSelection()); assertTrue(6000 <= parameters.getTimeoutMs()); assertEquals(4, parameters.getSlices()); assertEquals(1, parameters.getSliceId()); assertEquals(0, parameters.getFromTimestamp()); assertEquals(0, parameters.getToTimestamp()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0)); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1)); VisitorStatistics statistics = new VisitorStatistics(); statistics.setBucketsVisited(1); statistics.setDocumentsVisited(2); parameters.getControlHandler().onVisitorStatistics(statistics); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK"); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2)); }); response = driver.sendRequest("http: "&selection=all%20the%20things&fieldSet=[id]&timeout=6&stream=true&slices=4&sliceId=1"); assertSameJson(""" { "pathId": "/document/v1", "documents": [ { "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": { "type": "tensor(x[3])", "values": [1.0,2.0,3.0] } } }, { "id": "id:space:music:n=1:two", "fields": { "artist": "Asa-Chan & Jun-Ray", "embedding": { "type": "tensor(x[3])", "values": [4.0,5.0,6.0] } } } ], "documentCount": 2 }""", response.readAll()); assertEquals(200, response.getStatus()); ProgressToken progress = new ProgressToken(); VisitorIterator.createFromExplicitBucketSet(Set.of(new BucketId(1), new BucketId(2)), 8, progress) .update(new BucketId(1), new BucketId(1)); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space')", parameters.getDocumentSelection()); assertEquals(progress.serializeToString(), parameters.getResumeToken().serializeToString()); throw new IllegalArgumentException("parse failure"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "message": "parse failure" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space')", parameters.getDocumentSelection()); parameters.getControlHandler().onProgress(progress); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.FAILURE, "failure?"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documents": [], "documentCount": 0, "message": "failure?" }""", response.readAll()); assertEquals(200, response.getStatus()); assertNull(response.getResponse().headers().get("X-Vespa-Ignored-Fields")); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "message": "Must specify 'destinationCluster' at '/document/v1/space/music/docid'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { assertEquals("[Content:cluster=content]", parameters.getRemoteDataHandler()); assertEquals("[document]", parameters.fieldSet()); assertEquals(60_000L, parameters.getSessionTimeoutMs()); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "We made it!"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); access.expect(tokens.subList(2, 3)); access.expect(parameters -> { assertEquals("(true) and (music) and (id.namespace=='space')", parameters.getDocumentSelection()); assertEquals("[id]", parameters.fieldSet()); assertEquals(10_000, parameters.getSessionTimeoutMs()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2)); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "Won't care"); }); access.session.expect((update, parameters) -> { DocumentUpdate expectedUpdate = new DocumentUpdate(doc3.getDataType(), doc3.getId()); expectedUpdate.addFieldUpdate(FieldUpdate.createAssign(doc3.getField("artist"), new StringFieldValue("Lisa Ekdahl"))); expectedUpdate.setCondition(new TestAndSetCondition("true")); assertEquals(expectedUpdate, update); parameters.responseHandler().get().handleResponse(new UpdateResponse(0, false)); assertEquals(parameters().withRoute("content"), parameters); return new Result(); }); response = driver.sendRequest("http: """ { "fields": { "artist": { "assign": "Lisa Ekdahl" }, "nonexisting": { "assign": "Ignored" } } }"""); assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); assertEquals("true", response.getResponse().headers().get("X-Vespa-Ignored-Fields").get(0).toString()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/group/troupe", "message": "Must specify 'cluster' at '/document/v1/space/music/group/troupe'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/group/troupe", "message": "Must specify 'selection' at '/document/v1/space/music/group/troupe'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(tokens.subList(0, 1)); access.expect(parameters -> { assertEquals("(false) and (music) and (id.namespace=='space')", parameters.getDocumentSelection()); assertEquals("[id]", parameters.fieldSet()); assertEquals(60_000, parameters.getSessionTimeoutMs()); parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(0)); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.ABORTED, "Huzzah?"); }); access.session.expect((remove, parameters) -> { DocumentRemove expectedRemove = new DocumentRemove(doc2.getId()); expectedRemove.setCondition(new TestAndSetCondition("false")); assertEquals(expectedRemove, remove); assertEquals(parameters().withRoute("content"), parameters); parameters.responseHandler().get().handleResponse(new DocumentIdResponse(0, doc2.getId(), "boom", Response.Outcome.ERROR)); return new Result(); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid", "documentCount": 0, "message": "boom" }""", response.readAll()); assertEquals(502, response.getStatus()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "message": "Must specify 'selection' at '/document/v1/'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { fail("Not supposed to run"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "message": "Must specify 'cluster' at '/document/v1/'" }""", response.readAll()); assertEquals(400, response.getStatus()); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space') and (id.group=='best\\'')", parameters.getDocumentSelection()); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.FAILURE, "error"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/group/best%27", "documents": [], "documentCount": 0, "message": "error" }""", response.readAll()); assertEquals(502, response.getStatus()); access.expect(parameters -> { assertEquals("(music) and (id.namespace=='space') and (id.user==123)", parameters.getDocumentSelection()); VisitorStatistics statistics = new VisitorStatistics(); statistics.setBucketsVisited(1); statistics.setDocumentsVisited(0); parameters.getControlHandler().onVisitorStatistics(statistics); parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.ABORTED, "aborted"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/number/123", "documents": [ ], "documentCount": 0 }""", response.readAll()); assertEquals(200, response.getStatus()); access.expect(parameters -> { fail("unreachable"); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/", "message": "toTimestamp must be greater than, or equal to, fromTimestamp" }""", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((id, parameters) -> { assertEquals(doc1.getId(), id); assertEquals(parameters().withRoute("content").withFieldSet("go"), parameters); parameters.responseHandler().get().handleResponse(new DocumentResponse(0, null)); return new Result(); }); response = driver.sendRequest("http: assertSameJson(""" { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one" }""", response.readAll()); assertEquals(404, response.getStatus()); access.session.expect((id, parameters) -> { assertEquals(doc1.getId(), id); assertEquals(parameters().withFieldSet("music:[document]"), parameters); parameters.responseHandler().get().handleResponse(new DocumentResponse(0, doc1)); return new Result(); }); response = driver.sendRequest("http: String shortJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": { "type": "tensor(x[3])","values": [1.0, 2.0, 3.0]} } } """; assertEquals(200, response.getStatus()); assertSameJson(shortJson, response.readAll()); response = driver.sendRequest("http: String longJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": { "type": "tensor(x[3])","cells": [{"address":{"x":"0"},"value":1.0},{"address":{"x":"1"},"value": 2.0},{"address":{"x":"2"},"value": 3.0}]} } } """; assertEquals(200, response.getStatus()); assertSameJson(longJson, response.readAll()); response = driver.sendRequest("http: String shortDirectJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": [1.0, 2.0, 3.0]} } } """; assertEquals(200, response.getStatus()); assertSameJson(shortDirectJson, response.readAll()); response = driver.sendRequest("http: String longDirectJson = """ { "pathId": "/document/v1/space/music/docid/one", "id": "id:space:music::one", "fields": { "artist": "Tom Waits", "embedding": [{"address":{"x":"0"},"value":1.0},{"address":{"x":"1"},"value": 2.0},{"address":{"x":"2"},"value": 3.0}] } } """; assertEquals(200, response.getStatus()); assertSameJson(longDirectJson, response.readAll()); access.session.expect((id, parameters) -> { assertEquals(new DocumentId("id:space:music::one/two/three"), id); assertEquals(parameters().withFieldSet("music:[document]"), parameters); parameters.responseHandler().get().handleResponse(new DocumentResponse(0)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one/two/three\"," + " \"id\": \"id:space:music::one/two/three\"" + "}", response.readAll()); assertEquals(404, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"May not specify 'dryRun' at '/document/v1/space/music/number/1/two'\"\n" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: "NOT JSON, NOT PARSED"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: "NOT JSON, NOT PARSED"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { fail("Should not cause an actual feed operation"); return null; }); response = driver.sendRequest("http: "NOT JSON, NOT PARSED"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((update, parameters) -> { DocumentUpdate expectedUpdate = new DocumentUpdate(doc3.getDataType(), doc3.getId()); expectedUpdate.addFieldUpdate(FieldUpdate.createAssign(doc3.getField("artist"), new StringFieldValue("Lisa Ekdahl"))); expectedUpdate.setCreateIfNonExistent(true); assertEquals(expectedUpdate, update); assertEquals(parameters(), parameters); parameters.responseHandler().get().handleResponse(new UpdateResponse(0, true)); return new Result(); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/group/a/three\"," + " \"id\": \"id:space:music:g=a:three\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((put, parameters) -> { DocumentPut expectedPut = new DocumentPut(doc2); expectedPut.setCondition(new TestAndSetCondition("test it")); expectedPut.setCreateIfNonExistent(true); assertEquals(expectedPut, put); assertEquals(parameters().withTraceLevel(9), parameters); Trace trace = new Trace(9); trace.trace(7, "Tracy Chapman", false); trace.getRoot().addChild(new TraceNode().setStrict(false) .addChild("Fast Car") .addChild("Baby Can I Hold You")); parameters.responseHandler().get().handleResponse(new DocumentResponse(0, doc2, trace)); return new Result(); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": \"Asa-Chan & Jun-Ray\"," + " \"embedding\": { \"values\": [4.0,5.0,6.0] } " + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"trace\": [" + " {" + " \"message\": \"Tracy Chapman\"" + " }," + " {" + " \"fork\": [" + " {" + " \"message\": \"Fast Car\"" + " }," + " {" + " \"message\": \"Baby Can I Hold You\"" + " }" + " ]" + " }" + " ]" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Could not read document, no document?\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: "{" + " ┻━┻︵ \\(°□°)/ ︵ ┻━┻" + "}"); Inspector responseRoot = SlimeUtils.jsonToSlime(response.readAll()).get(); assertEquals("/document/v1/space/music/number/1/two", responseRoot.field("pathId").asString()); assertTrue(responseRoot.field("message").asString().startsWith("Unexpected character ('┻' (code 9531 / 0x253b)): was expecting double-quote to start field name")); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"Lisa Ekdahl\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/house/group/a/three\"," + " \"message\": \"Document type house does not exist\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((update, parameters) -> { parameters.responseHandler().get().handleResponse(new UpdateResponse(0, false)); return new Result(); }); response = driver.sendRequest("http: "{" + " \"fields\": {" + " \"artist\": { \"assign\": \"The Shivers\" }" + " }" + "}"); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/sonny\"," + " \"id\": \"id:space:music::sonny\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((remove, parameters) -> { DocumentRemove expectedRemove = new DocumentRemove(doc2.getId()); expectedRemove.setCondition(new TestAndSetCondition("false")); assertEquals(expectedRemove, remove); assertEquals(parameters().withRoute("route"), parameters); parameters.responseHandler().get().handleResponse(new DocumentIdResponse(0, doc2.getId())); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"" + "}", response.readAll()); assertEquals(200, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Expected non-empty value for request property 'route'\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Your Vespa deployment has no content cluster 'throw-me', only 'content'\"" + "}", response.readAll()); assertEquals(400, response.getStatus()); access.session.expect((id, parameters) -> { assertFalse(clock.instant().plusSeconds(1000).isAfter(parameters.deadline().get())); parameters.responseHandler().get().handleResponse(new Response(0, "timeout", Response.Outcome.TIMEOUT)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"timeout\"" + "}", response.readAll()); assertEquals(504, response.getStatus()); access.session.expect((id, parameters) -> { parameters.responseHandler().get().handleResponse(new Response(0, "disk full", Response.Outcome.INSUFFICIENT_STORAGE)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"disk full\"" + "}", response.readAll()); assertEquals(507, response.getStatus()); access.session.expect((id, parameters) -> { parameters.responseHandler().get().handleResponse(new Response(0, "no dice", Response.Outcome.CONDITION_FAILED)); return new Result(); }); response = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"id\": \"id:space:music:n=1:two\"," + " \"message\": \"no dice\"" + "}", response.readAll()); assertEquals(412, response.getStatus()); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("https: assertEquals("", response.readAll()); assertEquals(204, response.getStatus()); assertEquals("GET,POST,PUT,DELETE", response.getResponse().headers().getFirst("Allow")); access.session.expect((__, ___) -> { throw new AssertionError("Not supposed to happen"); }); response = driver.sendRequest("https: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"message\": \"'PATCH' not allowed at '/document/v1/space/music/docid/one'. Allowed methods are: GET, POST, PUT, DELETE\"" + "}", response.readAll()); assertEquals(405, response.getStatus()); access.session.expect((id, parameters) -> new Result(Result.ResultType.TRANSIENT_ERROR, Result.toError(Result.ResultType.TRANSIENT_ERROR))); var response1 = driver.sendRequest("http: var response2 = driver.sendRequest("http: var response3 = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"Rejecting execution due to overload: 2 requests already enqueued\"" + "}", response3.readAll()); assertEquals(429, response3.getStatus()); access.session.expect((id, parameters) -> new Result(Result.ResultType.FATAL_ERROR, Result.toError(Result.ResultType.FATAL_ERROR))); handler.dispatchEnqueued(); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"[FATAL_ERROR @ localhost]: FATAL_ERROR\"" + "}", response1.readAll()); assertEquals(502, response1.getStatus()); assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/number/1/two\"," + " \"message\": \"[FATAL_ERROR @ localhost]: FATAL_ERROR\"" + "}", response2.readAll()); assertEquals(502, response2.getStatus()); AtomicReference<ResponseHandler> handler = new AtomicReference<>(); access.session.expect((id, parameters) -> { handler.set(parameters.responseHandler().get()); return new Result(); }); try { var response4 = driver.sendRequest("http: assertSameJson("{" + " \"pathId\": \"/document/v1/space/music/docid/one\"," + " \"message\": \"Timeout after 1ms\"" + "}", response4.readAll()); assertEquals(504, response4.getStatus()); } finally { if (handler.get() != null) handler.get().handleResponse(new Response(0)); } assertEquals(3, metric.metrics().get("httpapi_succeeded").get(Map.of()), 0); assertEquals(1, metric.metrics().get("httpapi_condition_not_met").get(Map.of()), 0); assertEquals(1, metric.metrics().get("httpapi_not_found").get(Map.of()), 0); assertEquals(1, metric.metrics().get("httpapi_failed").get(Map.of()), 0); driver.close(); }
Yikes. Thanks!
private boolean fitsPerfectly(Node host) { return requestedNodes.resources().get().compatibleWith(host.resources()); }
return requestedNodes.resources().get().compatibleWith(host.resources());
private boolean fitsPerfectly(Node host) { return host.resources().compatibleWith(requestedNodes.resources().get()); }
class NodePrioritizer { private final List<NodeCandidate> candidates = new ArrayList<>(); private final LockedNodeList allNodes; private final HostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId application; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final Nodes nodes; private final boolean dynamicProvisioning; private final boolean canAllocateToSpareHosts; private final boolean topologyChange; private final int currentClusterSize; private final Set<Node> spareHosts; private final boolean enclave; public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes, HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean enclave) { this.allNodes = allNodes; this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.application = application; this.dynamicProvisioning = dynamicProvisioning; this.spareHosts = dynamicProvisioning ? capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) : capacity.findSpareHosts(this.allNodes.asList(), spareCount); this.nameResolver = nameResolver; this.nodes = nodes; this.enclave = enclave; NodeList nodesInCluster = this.allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index)) .stream()) .distinct() .count(); this.topologyChange = currentGroups != wantedGroups; this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream() .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group())) .filter(clusterSpec.group()::equals) .count(); this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group()); } /** Collects all node candidates for this application and returns them in the most-to-least preferred order */ public List<NodeCandidate> collect(List<Node> surplusActiveNodes) { addApplicationNodes(); addSurplusNodes(surplusActiveNodes); addReadyNodes(); addCandidatesOnExistingHosts(); return prioritize(); } /** Returns the list of nodes sorted by {@link NodeCandidate private List<NodeCandidate> prioritize() { Map<String, List<NodeCandidate>> candidatesBySwitch = this.candidates.stream() .collect(Collectors.groupingBy(candidate -> candidate.parent.orElseGet(candidate::toNode) .switchHostname() .orElse(""))); List<NodeCandidate> nodes = new ArrayList<>(this.candidates.size()); for (var clusterSwitch : candidatesBySwitch.keySet()) { List<NodeCandidate> switchCandidates = candidatesBySwitch.get(clusterSwitch); if (clusterSwitch.isEmpty()) { nodes.addAll(switchCandidates); } else { Collections.sort(switchCandidates); NodeCandidate bestNode = switchCandidates.get(0); nodes.add(bestNode); for (var node : switchCandidates.subList(1, switchCandidates.size())) { nodes.add(node.withExclusiveSwitch(false)); } } } Collections.sort(nodes); return nodes; } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ private void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { NodeCandidate candidate = candidateFrom(node, true); if (!candidate.violatesSpares || canAllocateToSpareHosts) { candidates.add(candidate); } } } /** Add a node on each host with enough capacity for the requested flavor */ private void addCandidatesOnExistingHosts() { if (requestedNodes.resources().isEmpty()) return; for (Node host : allNodes) { if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue; if (nodes.suspended(host)) continue; if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue; if (host.reservedTo().isPresent() && application.instance().isTester()) continue; if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue; if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue; if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue; if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue; if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue; candidates.add(NodeCandidate.createNewChild(requestedNodes.resources().get(), capacity.availableCapacityOf(host), host, spareHosts.contains(host), allNodes, nameResolver, !enclave)); } } /** Add existing nodes allocated to the application */ private void addApplicationNodes() { EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .filter(node -> node.state() == Node.State.active || canStillAllocate(node)) .map(node -> candidateFrom(node, false)) .forEach(candidates::add); } /** Add nodes already provisioned, but not allocated to any application */ private void addReadyNodes() { allNodes.stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> node.state() == Node.State.ready) .map(node -> candidateFrom(node, false)) .filter(n -> !n.violatesSpares || canAllocateToSpareHosts) .forEach(candidates::add); } /** Create a candidate from given pre-existing node */ private NodeCandidate candidateFrom(Node node, boolean isSurplus) { Optional<Node> optionalParent = allNodes.parentOf(node); if (optionalParent.isPresent()) { Node parent = optionalParent.get(); return NodeCandidate.createChild(node, capacity.availableCapacityOf(parent), parent, spareHosts.contains(parent), isSurplus, false, parent.exclusiveToApplicationId().isEmpty() && requestedNodes.canResize(node.resources(), capacity.unusedCapacityOf(parent), clusterSpec.type(), topologyChange, currentClusterSize)); } else { return NodeCandidate.createStandalone(node, isSurplus, false); } } /** Returns whether we are allocating to replace a failed node */ private boolean isReplacement(NodeList nodesInCluster, Optional<ClusterSpec.Group> group) { NodeList nodesInGroup = group.map(ClusterSpec.Group::index) .map(nodesInCluster::group) .orElse(nodesInCluster); int failedNodesInGroup = nodesInGroup.failing().size() + nodesInGroup.state(Node.State.failed).size(); if (failedNodesInGroup == 0) return false; return ! requestedNodes.fulfilledBy(nodesInGroup.size() - failedNodesInGroup); } /** * We may regret that a non-active node is allocated to a host and not offer it to the application * now, e.g if we want to retire the host. * * @return true if we still want to allocate the given node to its parent */ private boolean canStillAllocate(Node node) { if (node.type() != NodeType.tenant || node.parentHostname().isEmpty()) return true; Optional<Node> parent = allNodes.parentOf(node); return parent.isPresent() && nodes.canAllocateTenantNodeTo(parent.get(), dynamicProvisioning); } }
class NodePrioritizer { private final List<NodeCandidate> candidates = new ArrayList<>(); private final LockedNodeList allNodes; private final HostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId application; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final Nodes nodes; private final boolean dynamicProvisioning; private final boolean canAllocateToSpareHosts; private final boolean topologyChange; private final int currentClusterSize; private final Set<Node> spareHosts; private final boolean enclave; public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes, HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean enclave) { this.allNodes = allNodes; this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.application = application; this.dynamicProvisioning = dynamicProvisioning; this.spareHosts = dynamicProvisioning ? capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) : capacity.findSpareHosts(this.allNodes.asList(), spareCount); this.nameResolver = nameResolver; this.nodes = nodes; this.enclave = enclave; NodeList nodesInCluster = this.allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index)) .stream()) .distinct() .count(); this.topologyChange = currentGroups != wantedGroups; this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream() .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group())) .filter(clusterSpec.group()::equals) .count(); this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group()); } /** Collects all node candidates for this application and returns them in the most-to-least preferred order */ public List<NodeCandidate> collect(List<Node> surplusActiveNodes) { addApplicationNodes(); addSurplusNodes(surplusActiveNodes); addReadyNodes(); addCandidatesOnExistingHosts(); return prioritize(); } /** Returns the list of nodes sorted by {@link NodeCandidate private List<NodeCandidate> prioritize() { Map<String, List<NodeCandidate>> candidatesBySwitch = this.candidates.stream() .collect(Collectors.groupingBy(candidate -> candidate.parent.orElseGet(candidate::toNode) .switchHostname() .orElse(""))); List<NodeCandidate> nodes = new ArrayList<>(this.candidates.size()); for (var clusterSwitch : candidatesBySwitch.keySet()) { List<NodeCandidate> switchCandidates = candidatesBySwitch.get(clusterSwitch); if (clusterSwitch.isEmpty()) { nodes.addAll(switchCandidates); } else { Collections.sort(switchCandidates); NodeCandidate bestNode = switchCandidates.get(0); nodes.add(bestNode); for (var node : switchCandidates.subList(1, switchCandidates.size())) { nodes.add(node.withExclusiveSwitch(false)); } } } Collections.sort(nodes); return nodes; } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ private void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { NodeCandidate candidate = candidateFrom(node, true); if (!candidate.violatesSpares || canAllocateToSpareHosts) { candidates.add(candidate); } } } /** Add a node on each host with enough capacity for the requested flavor */ private void addCandidatesOnExistingHosts() { if (requestedNodes.resources().isEmpty()) return; for (Node host : allNodes) { if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue; if (nodes.suspended(host)) continue; if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue; if (host.reservedTo().isPresent() && application.instance().isTester()) continue; if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue; if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue; if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue; if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue; if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue; candidates.add(NodeCandidate.createNewChild(requestedNodes.resources().get(), capacity.availableCapacityOf(host), host, spareHosts.contains(host), allNodes, nameResolver, !enclave)); } } /** Add existing nodes allocated to the application */ private void addApplicationNodes() { EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .filter(node -> node.state() == Node.State.active || canStillAllocate(node)) .map(node -> candidateFrom(node, false)) .forEach(candidates::add); } /** Add nodes already provisioned, but not allocated to any application */ private void addReadyNodes() { allNodes.stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> node.state() == Node.State.ready) .map(node -> candidateFrom(node, false)) .filter(n -> !n.violatesSpares || canAllocateToSpareHosts) .forEach(candidates::add); } /** Create a candidate from given pre-existing node */ private NodeCandidate candidateFrom(Node node, boolean isSurplus) { Optional<Node> optionalParent = allNodes.parentOf(node); if (optionalParent.isPresent()) { Node parent = optionalParent.get(); return NodeCandidate.createChild(node, capacity.availableCapacityOf(parent), parent, spareHosts.contains(parent), isSurplus, false, parent.exclusiveToApplicationId().isEmpty() && requestedNodes.canResize(node.resources(), capacity.unusedCapacityOf(parent), clusterSpec.type(), topologyChange, currentClusterSize)); } else { return NodeCandidate.createStandalone(node, isSurplus, false); } } /** Returns whether we are allocating to replace a failed node */ private boolean isReplacement(NodeList nodesInCluster, Optional<ClusterSpec.Group> group) { NodeList nodesInGroup = group.map(ClusterSpec.Group::index) .map(nodesInCluster::group) .orElse(nodesInCluster); int failedNodesInGroup = nodesInGroup.failing().size() + nodesInGroup.state(Node.State.failed).size(); if (failedNodesInGroup == 0) return false; return ! requestedNodes.fulfilledBy(nodesInGroup.size() - failedNodesInGroup); } /** * We may regret that a non-active node is allocated to a host and not offer it to the application * now, e.g if we want to retire the host. * * @return true if we still want to allocate the given node to its parent */ private boolean canStillAllocate(Node node) { if (node.type() != NodeType.tenant || node.parentHostname().isEmpty()) return true; Optional<Node> parent = allNodes.parentOf(node); return parent.isPresent() && nodes.canAllocateTenantNodeTo(parent.get(), dynamicProvisioning); } }
Hmm, perhaps this can be simpler ...
private void warmup() { try { String uri = "/search/?yql=select+*+from+sources+*+where+true+limit+1;"; HttpRequest request = HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET); Query query = new Query.Builder().setRequest(request) .setRequestMap(requestMapFromRequest(request)) .setQueryProfile(queryProfileRegistry.findQueryProfile(null)) .setEmbedders(embedders) .setZoneInfo(zoneInfo) .setSchemaInfo(executionFactory.schemaInfo()) .build(); search(uri, query, executionFactory.searchChainRegistry().getChain(defaultSearchChainName)); } catch (RuntimeException e) { log.log(Level.WARNING, "Error warming up search handler", e); } }
Query query = new Query.Builder().setRequest(request)
private void warmup() { try { handle(HttpRequest.createTestRequest("/search/?timeout=2s&yql=select+*+from+sources+*+where+true+limit+1;", com.yahoo.jdisc.http.HttpRequest.Method.GET)); } catch (RuntimeException e) { log.log(Level.WARNING, "Error warming up search handler", e); } }
class SearchHandler extends LoggingRequestHandler { private static final Logger log = Logger.getLogger(SearchHandler.class.getName()); private final AtomicInteger requestsInFlight = new AtomicInteger(0); private final int maxThreads; private static final CompoundName DETAILED_TIMING_LOGGING = CompoundName.from("trace.timingDetails"); private static final CompoundName FORCE_TIMESTAMPS = CompoundName.from("trace.timestamps"); /** Event name for number of connections to the search subsystem */ private static final String SEARCH_CONNECTIONS = "search_connections"; static final String RENDER_LATENCY_METRIC = ContainerMetrics.JDISC_RENDER_LATENCY.baseName(); static final String MIME_DIMENSION = "mime"; static final String RENDERER_DIMENSION = "renderer"; private static final String JSON_CONTENT_TYPE = "application/json"; public static final String defaultSearchChainName = "default"; private static final String fallbackSearchChain = "vespa"; private final CompiledQueryProfileRegistry queryProfileRegistry; /** If present, responses from this will set the HTTP response header with this key to the host name of this */ private final Optional<String> hostResponseHeaderKey; private final String selfHostname = HostName.getLocalhost(); private final Map<String, Embedder> embedders; private final ExecutionFactory executionFactory; private final AtomicLong numRequestsLeftToTrace; private final ZoneInfo zoneInfo; private final static RequestHandlerSpec REQUEST_HANDLER_SPEC = RequestHandlerSpec.builder() .withAclMapping(SearchHandler.aclRequestMapper()).build(); @Inject public SearchHandler(Metric metric, ContainerThreadPool threadpool, CompiledQueryProfileRegistry queryProfileRegistry, ContainerHttpConfig config, ComponentRegistry<Embedder> embedders, ExecutionFactory executionFactory, ZoneInfo zoneInfo) { this(metric, threadpool.executor(), queryProfileRegistry, embedders, executionFactory, config.numQueriesToTraceOnDebugAfterConstruction(), config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey()), zoneInfo); } private SearchHandler(Metric metric, Executor executor, CompiledQueryProfileRegistry queryProfileRegistry, ComponentRegistry<Embedder> embedders, ExecutionFactory executionFactory, long numQueriesToTraceOnDebugAfterStartup, Optional<String> hostResponseHeaderKey, ZoneInfo zoneInfo) { super(executor, metric, true); log.log(Level.FINE, () -> "SearchHandler.init " + System.identityHashCode(this)); this.queryProfileRegistry = queryProfileRegistry; this.embedders = toMap(embedders); this.executionFactory = executionFactory; this.maxThreads = examineExecutor(executor); this.hostResponseHeaderKey = hostResponseHeaderKey; this.numRequestsLeftToTrace = new AtomicLong(numQueriesToTraceOnDebugAfterStartup); metric.set(SEARCH_CONNECTIONS, 0.0d, null); this.zoneInfo = zoneInfo; warmup(); } Metric metric() { return metric; } private static int examineExecutor(Executor executor) { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getMaximumPoolSize(); } return Integer.MAX_VALUE; } @Override public final HttpResponse handle(com.yahoo.container.jdisc.HttpRequest request) { requestsInFlight.incrementAndGet(); try { try { return handleBody(request); } catch (IllegalInputException e) { return illegalQueryResponse(request, e); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed handling " + request, e); return internalServerErrorResponse(request, e); } } finally { requestsInFlight.decrementAndGet(); } } @Override public Optional<Request.RequestType> getRequestType() { return Optional.of(Request.RequestType.READ); } static int getHttpResponseStatus(com.yahoo.container.jdisc.HttpRequest httpRequest, Result result) { boolean benchmarkOutput = VespaHeaders.benchmarkOutput(httpRequest); if (benchmarkOutput) { return VespaHeaders.getEagerErrorStatus(result.hits().getError(), SearchResponse.getErrorIterator(result.hits().getErrorHit())); } else { return VespaHeaders.getStatus(SearchResponse.isSuccess(result), result.hits().getError(), SearchResponse.getErrorIterator(result.hits().getErrorHit())); } } private HttpResponse errorResponse(HttpRequest request, ErrorMessage errorMessage) { Query query = new Query(); Result result = new Result(query, errorMessage); Renderer<Result> renderer = getRendererCopy(ComponentSpecification.fromString(request.getProperty("format"))); return new HttpSearchResponse(getHttpResponseStatus(request, result), result, query, renderer); } private HttpResponse illegalQueryResponse(HttpRequest request, RuntimeException e) { return errorResponse(request, ErrorMessage.createIllegalQuery(Exceptions.toMessageString(e))); } private HttpResponse internalServerErrorResponse(HttpRequest request, RuntimeException e) { return errorResponse(request, ErrorMessage.createInternalServerError(Exceptions.toMessageString(e))); } private HttpSearchResponse handleBody(HttpRequest request) { Map<String, String> requestMap = requestMapFromRequest(request); String queryProfileName = requestMap.getOrDefault("queryProfile", null); CompiledQueryProfile queryProfile = queryProfileRegistry.findQueryProfile(queryProfileName); Query query = new Query.Builder().setRequest(request) .setRequestMap(requestMap) .setQueryProfile(queryProfile) .setEmbedders(embedders) .setZoneInfo(zoneInfo) .setSchemaInfo(executionFactory.schemaInfo()) .build(); boolean benchmarking = VespaHeaders.benchmarkOutput(request); boolean benchmarkCoverage = VespaHeaders.benchmarkCoverage(benchmarking, request.getJDiscRequest().headers()); if (benchmarking && ! request.hasProperty(SoftTimeout.enableProperty.toString())) query.properties().set(SoftTimeout.enableProperty, false); String invalidReason = query.validate(); Chain<Searcher> searchChain = null; String searchChainName = null; if (invalidReason == null) { Tuple2<String, Chain<Searcher>> nameAndChain = resolveChain(query.properties().getString(Query.SEARCH_CHAIN)); searchChainName = nameAndChain.first; searchChain = nameAndChain.second; } Result result; if (invalidReason != null) { result = new Result(query, ErrorMessage.createIllegalQuery(invalidReason)); } else if (queryProfile == null && queryProfileName != null) { result = new Result(query, ErrorMessage.createIllegalQuery("Could not resolve query profile '" + queryProfileName + "'")); } else if (searchChain == null) { result = new Result(query, ErrorMessage.createInvalidQueryParameter("No search chain named '" + searchChainName + "' was found")); } else { String pathAndQuery = UriTools.rawRequest(request.getUri()); result = search(pathAndQuery, query, searchChain); } Renderer<Result> renderer = toRendererCopy(query.getPresentation().getRenderer()); HttpSearchResponse response = new HttpSearchResponse(getHttpResponseStatus(request, result), result, query, renderer, extractTraceNode(query), metric); response.setRequestType(Request.RequestType.READ); hostResponseHeaderKey.ifPresent(key -> response.headers().add(key, selfHostname)); if (benchmarking) VespaHeaders.benchmarkOutput(response.headers(), benchmarkCoverage, response.getTiming(), response.getHitCounts(), getErrors(result), response.getCoverage()); return response; } private static TraceNode extractTraceNode(Query query) { if (log.isLoggable(Level.FINE)) { QueryContext queryContext = query.getContext(false); if (queryContext != null) { Execution.Trace trace = queryContext.getTrace(); if (trace != null) { return trace.traceNode(); } } } return null; } private static int getErrors(Result result) { return result.hits().getErrorHit() == null ? 0 : 1; } private Renderer<Result> toRendererCopy(ComponentSpecification format) { return perRenderingCopy(executionFactory.rendererRegistry().getRenderer(format)); } private Tuple2<String, Chain<Searcher>> resolveChain(String explicitChainName) { String chainName = explicitChainName; if (chainName == null) { chainName = defaultSearchChainName; } Chain<Searcher> searchChain = executionFactory.searchChainRegistry().getChain(chainName); if (searchChain == null && explicitChainName == null) { chainName = fallbackSearchChain; searchChain = executionFactory.searchChainRegistry().getChain(chainName); } return new Tuple2<>(chainName, searchChain); } /** Used from container SDK, for internal use only */ public Result searchAndFill(Query query, Chain<? extends Searcher> searchChain) { Result errorResult = validateQuery(query); if (errorResult != null) return errorResult; Renderer<Result> renderer = executionFactory.rendererRegistry().getRenderer(query.getPresentation().getRenderer()); if (query.getPresentation().getSummary() == null && renderer instanceof com.yahoo.search.rendering.Renderer) query.getPresentation().setSummary(((com.yahoo.search.rendering.Renderer) renderer).getDefaultSummaryClass()); Execution execution = executionFactory.newExecution(searchChain); query.getModel().setExecution(execution); if (log.isLoggable(Level.FINE) && (numRequestsLeftToTrace.getAndDecrement() > 0)) { query.setTraceLevel(Math.max(1, query.getTraceLevel())); execution.trace().setForceTimestamps(true); } else { execution.trace().setForceTimestamps(query.properties().getBoolean(FORCE_TIMESTAMPS, false)); } if (query.properties().getBoolean(DETAILED_TIMING_LOGGING, false)) { execution.context().setDetailedDiagnostics(true); } Result result = execution.search(query); ensureQuerySet(result, query); execution.fill(result, result.getQuery().getPresentation().getSummary()); traceExecutionTimes(query, result); traceVespaVersion(query); traceRequestAttributes(query); return result; } private void traceRequestAttributes(Query query) { int miminumTraceLevel = 7; if (query.getTraceLevel() >= 7) { query.trace("Request attributes: " + query.getHttpRequest().context(), miminumTraceLevel); } } /** For internal use only */ public Renderer<Result> getRendererCopy(ComponentSpecification spec) { Renderer<Result> renderer = executionFactory.rendererRegistry().getRenderer(spec); return perRenderingCopy(renderer); } private Renderer<Result> perRenderingCopy(Renderer<Result> renderer) { Renderer<Result> copy = renderer.clone(); copy.init(); return copy; } private void ensureQuerySet(Result result, Query fallbackQuery) { Query query = result.getQuery(); if (query == null) { result.setQuery(fallbackQuery); } } private Result search(String request, Query query, Chain<Searcher> searchChain) { if (query.getTraceLevel() >= 2) { query.trace("Invoking " + searchChain, false, 2); } connectionStatistics(); try { return searchAndFill(query, searchChain); } catch (ParseException e) { ErrorMessage error = ErrorMessage.createIllegalQuery("Could not parse query [" + request + "]: " + Exceptions.toMessageString(e)); log.log(Level.FINE, error::getDetailedMessage); return new Result(query, error); } catch (IllegalInputException e) { ErrorMessage error = ErrorMessage.createBadRequest("Invalid request [" + request + "]: " + Exceptions.toMessageString(e)); log.log(Level.FINE, error::getDetailedMessage); return new Result(query, error); } catch (Exception e) { log(request, query, e); return new Result(query, ErrorMessage.createUnspecifiedError("Failed: " + Exceptions.toMessageString(e), e)); } catch (LinkageError | StackOverflowError e) { ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " + Exceptions.toMessageString(e), e); log(request, query, e); return new Result(query, error); } } private void connectionStatistics() { if (maxThreads <= 3) return; int connections = requestsInFlight.intValue(); metric.set(SEARCH_CONNECTIONS, connections, null); long maxThreadsAsLong = maxThreads; long connectionsAsLong = connections; if (connectionsAsLong < maxThreadsAsLong * 9L / 10L) { } else if (connectionsAsLong == maxThreadsAsLong * 9L / 10L) { log.log(Level.WARNING, threadConsumptionMessage(connections, maxThreads, "90")); } else if (connectionsAsLong == maxThreadsAsLong * 95L / 100L) { log.log(Level.WARNING, threadConsumptionMessage(connections, maxThreads, "95")); } else if (connectionsAsLong == maxThreadsAsLong) { log.log(Level.WARNING, threadConsumptionMessage(connections, maxThreads, "100")); } } private String threadConsumptionMessage(int connections, int maxThreads, String percentage) { return percentage + "% of possible search connections (" + connections + " of maximum " + maxThreads + ") currently active."; } private void log(String request, Query query, Throwable e) { if (e.getStackTrace().length == 0) { log.log(Level.SEVERE, "Failed executing " + query.toDetailString() + " [" + request + "], received exception with no context", e); } else { log.log(Level.SEVERE, "Failed executing " + query.toDetailString() + " [" + request + "]", e); } } private Result validateQuery(Query query) { DefaultProperties.requireNotPresentIn(query.getHttpRequest().propertyMap()); int maxHits = query.properties().getInteger(DefaultProperties.MAX_HITS); int maxOffset = query.properties().getInteger(DefaultProperties.MAX_OFFSET); if (query.getHits() > maxHits) { return new Result(query, ErrorMessage.createIllegalQuery(query.getHits() + " hits requested, configured limit: " + maxHits + ". See https: } else if (query.getOffset() > maxOffset) { return new Result(query, ErrorMessage.createIllegalQuery("Offset of " + query.getOffset() + " requested, configured limit: " + maxOffset + ". See https: } return null; } private void traceExecutionTimes(Query query, Result result) { if (query.getTraceLevel() < 3) return; ElapsedTime elapsedTime = result.getElapsedTime(); long now = System.currentTimeMillis(); if (elapsedTime.firstFill() != 0) { query.trace("Query time " + query + ": " + (elapsedTime.firstFill() - elapsedTime.first()) + " ms", false, 3); query.trace("Summary fetch time " + query + ": " + (now - elapsedTime.firstFill()) + " ms", false, 3); } else { query.trace("Total search time " + query + ": " + (now - elapsedTime.first()) + " ms", false, 3); } } private void traceVespaVersion(Query query) { query.trace("Vespa version: " + Vtag.currentVersion, false, 4); } public SearchChainRegistry getSearchChainRegistry() { return executionFactory.searchChainRegistry(); } static private String getMediaType(HttpRequest request) { String header = request.getHeader(com.yahoo.jdisc.http.HttpHeaders.Names.CONTENT_TYPE); if (header == null) { return ""; } int semi = header.indexOf(';'); if (semi != -1) { header = header.substring(0, semi); } return com.yahoo.text.Lowercase.toLowerCase(header.trim()); } /** Add properties POSTed as a JSON payload, if any, to the request map */ private Map<String, String> requestMapFromRequest(HttpRequest request) { if (request.getMethod() != com.yahoo.jdisc.http.HttpRequest.Method.POST || ! JSON_CONTENT_TYPE.equals(getMediaType(request))) return request.propertyMap(); Map<String, String> requestMap = new Json2SingleLevelMap(request.getData()).parse(); requestMap.putAll(request.propertyMap()); if (requestMap.containsKey("yql") && (requestMap.containsKey("select.where") || requestMap.containsKey("select.grouping")) ) throw new IllegalInputException("Illegal query: Query contains both yql and select parameter"); if (requestMap.containsKey("query") && (requestMap.containsKey("select.where") || requestMap.containsKey("select.grouping")) ) throw new IllegalInputException("Illegal query: Query contains both query and select parameter"); return requestMap; } @Deprecated public void createRequestMapping(Inspector inspector, Map<String, String> map, String parent) { try { new Json2SingleLevelMap(new ByteArrayInputStream(inspector.toString().getBytes(StandardCharsets.UTF_8))).parse(map, parent); } catch (IOException e) { throw new RuntimeException("Failed creating request mapping for parent '" + parent + "'", e); } } @Override public RequestHandlerSpec requestHandlerSpec() { return REQUEST_HANDLER_SPEC; } private static AclMapping aclRequestMapper() { return HttpMethodAclMapping.standard() .override(com.yahoo.jdisc.http.HttpRequest.Method.POST, AclMapping.Action.READ) .build(); } private Map<String, Embedder> toMap(ComponentRegistry<Embedder> embedders) { var map = embedders.allComponentsById().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey().stringValue(), Map.Entry::getValue)); if (map.size() > 1) { map.remove(DefaultEmbedderProvider.class.getName()); } return Collections.unmodifiableMap(map); } }
class SearchHandler extends LoggingRequestHandler { private static final Logger log = Logger.getLogger(SearchHandler.class.getName()); private final AtomicInteger requestsInFlight = new AtomicInteger(0); private final int maxThreads; private static final CompoundName DETAILED_TIMING_LOGGING = CompoundName.from("trace.timingDetails"); private static final CompoundName FORCE_TIMESTAMPS = CompoundName.from("trace.timestamps"); /** Event name for number of connections to the search subsystem */ private static final String SEARCH_CONNECTIONS = "search_connections"; static final String RENDER_LATENCY_METRIC = ContainerMetrics.JDISC_RENDER_LATENCY.baseName(); static final String MIME_DIMENSION = "mime"; static final String RENDERER_DIMENSION = "renderer"; private static final String JSON_CONTENT_TYPE = "application/json"; public static final String defaultSearchChainName = "default"; private static final String fallbackSearchChain = "vespa"; private final CompiledQueryProfileRegistry queryProfileRegistry; /** If present, responses from this will set the HTTP response header with this key to the host name of this */ private final Optional<String> hostResponseHeaderKey; private final String selfHostname = HostName.getLocalhost(); private final Map<String, Embedder> embedders; private final ExecutionFactory executionFactory; private final AtomicLong numRequestsLeftToTrace; private final ZoneInfo zoneInfo; private final static RequestHandlerSpec REQUEST_HANDLER_SPEC = RequestHandlerSpec.builder() .withAclMapping(SearchHandler.aclRequestMapper()).build(); @Inject public SearchHandler(Metric metric, ContainerThreadPool threadpool, CompiledQueryProfileRegistry queryProfileRegistry, ContainerHttpConfig config, ComponentRegistry<Embedder> embedders, ExecutionFactory executionFactory, ZoneInfo zoneInfo) { this(metric, threadpool.executor(), queryProfileRegistry, embedders, executionFactory, config.numQueriesToTraceOnDebugAfterConstruction(), config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey()), zoneInfo); } private SearchHandler(Metric metric, Executor executor, CompiledQueryProfileRegistry queryProfileRegistry, ComponentRegistry<Embedder> embedders, ExecutionFactory executionFactory, long numQueriesToTraceOnDebugAfterStartup, Optional<String> hostResponseHeaderKey, ZoneInfo zoneInfo) { super(executor, metric, true); log.log(Level.FINE, () -> "SearchHandler.init " + System.identityHashCode(this)); this.queryProfileRegistry = queryProfileRegistry; this.embedders = toMap(embedders); this.executionFactory = executionFactory; this.maxThreads = examineExecutor(executor); this.hostResponseHeaderKey = hostResponseHeaderKey; this.numRequestsLeftToTrace = new AtomicLong(numQueriesToTraceOnDebugAfterStartup); metric.set(SEARCH_CONNECTIONS, 0.0d, null); this.zoneInfo = zoneInfo; warmup(); } Metric metric() { return metric; } private static int examineExecutor(Executor executor) { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getMaximumPoolSize(); } return Integer.MAX_VALUE; } @Override public final HttpResponse handle(com.yahoo.container.jdisc.HttpRequest request) { requestsInFlight.incrementAndGet(); try { try { return handleBody(request); } catch (IllegalInputException e) { return illegalQueryResponse(request, e); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed handling " + request, e); return internalServerErrorResponse(request, e); } } finally { requestsInFlight.decrementAndGet(); } } @Override public Optional<Request.RequestType> getRequestType() { return Optional.of(Request.RequestType.READ); } static int getHttpResponseStatus(com.yahoo.container.jdisc.HttpRequest httpRequest, Result result) { boolean benchmarkOutput = VespaHeaders.benchmarkOutput(httpRequest); if (benchmarkOutput) { return VespaHeaders.getEagerErrorStatus(result.hits().getError(), SearchResponse.getErrorIterator(result.hits().getErrorHit())); } else { return VespaHeaders.getStatus(SearchResponse.isSuccess(result), result.hits().getError(), SearchResponse.getErrorIterator(result.hits().getErrorHit())); } } private HttpResponse errorResponse(HttpRequest request, ErrorMessage errorMessage) { Query query = new Query(); Result result = new Result(query, errorMessage); Renderer<Result> renderer = getRendererCopy(ComponentSpecification.fromString(request.getProperty("format"))); return new HttpSearchResponse(getHttpResponseStatus(request, result), result, query, renderer); } private HttpResponse illegalQueryResponse(HttpRequest request, RuntimeException e) { return errorResponse(request, ErrorMessage.createIllegalQuery(Exceptions.toMessageString(e))); } private HttpResponse internalServerErrorResponse(HttpRequest request, RuntimeException e) { return errorResponse(request, ErrorMessage.createInternalServerError(Exceptions.toMessageString(e))); } private HttpSearchResponse handleBody(HttpRequest request) { Map<String, String> requestMap = requestMapFromRequest(request); String queryProfileName = requestMap.getOrDefault("queryProfile", null); CompiledQueryProfile queryProfile = queryProfileRegistry.findQueryProfile(queryProfileName); Query query = new Query.Builder().setRequest(request) .setRequestMap(requestMap) .setQueryProfile(queryProfile) .setEmbedders(embedders) .setZoneInfo(zoneInfo) .setSchemaInfo(executionFactory.schemaInfo()) .build(); boolean benchmarking = VespaHeaders.benchmarkOutput(request); boolean benchmarkCoverage = VespaHeaders.benchmarkCoverage(benchmarking, request.getJDiscRequest().headers()); if (benchmarking && ! request.hasProperty(SoftTimeout.enableProperty.toString())) query.properties().set(SoftTimeout.enableProperty, false); String invalidReason = query.validate(); Chain<Searcher> searchChain = null; String searchChainName = null; if (invalidReason == null) { Tuple2<String, Chain<Searcher>> nameAndChain = resolveChain(query.properties().getString(Query.SEARCH_CHAIN)); searchChainName = nameAndChain.first; searchChain = nameAndChain.second; } Result result; if (invalidReason != null) { result = new Result(query, ErrorMessage.createIllegalQuery(invalidReason)); } else if (queryProfile == null && queryProfileName != null) { result = new Result(query, ErrorMessage.createIllegalQuery("Could not resolve query profile '" + queryProfileName + "'")); } else if (searchChain == null) { result = new Result(query, ErrorMessage.createInvalidQueryParameter("No search chain named '" + searchChainName + "' was found")); } else { String pathAndQuery = UriTools.rawRequest(request.getUri()); result = search(pathAndQuery, query, searchChain); } Renderer<Result> renderer = toRendererCopy(query.getPresentation().getRenderer()); HttpSearchResponse response = new HttpSearchResponse(getHttpResponseStatus(request, result), result, query, renderer, extractTraceNode(query), metric); response.setRequestType(Request.RequestType.READ); hostResponseHeaderKey.ifPresent(key -> response.headers().add(key, selfHostname)); if (benchmarking) VespaHeaders.benchmarkOutput(response.headers(), benchmarkCoverage, response.getTiming(), response.getHitCounts(), getErrors(result), response.getCoverage()); return response; } private static TraceNode extractTraceNode(Query query) { if (log.isLoggable(Level.FINE)) { QueryContext queryContext = query.getContext(false); if (queryContext != null) { Execution.Trace trace = queryContext.getTrace(); if (trace != null) { return trace.traceNode(); } } } return null; } private static int getErrors(Result result) { return result.hits().getErrorHit() == null ? 0 : 1; } private Renderer<Result> toRendererCopy(ComponentSpecification format) { return perRenderingCopy(executionFactory.rendererRegistry().getRenderer(format)); } private Tuple2<String, Chain<Searcher>> resolveChain(String explicitChainName) { String chainName = explicitChainName; if (chainName == null) { chainName = defaultSearchChainName; } Chain<Searcher> searchChain = executionFactory.searchChainRegistry().getChain(chainName); if (searchChain == null && explicitChainName == null) { chainName = fallbackSearchChain; searchChain = executionFactory.searchChainRegistry().getChain(chainName); } return new Tuple2<>(chainName, searchChain); } /** Used from container SDK, for internal use only */ public Result searchAndFill(Query query, Chain<? extends Searcher> searchChain) { Result errorResult = validateQuery(query); if (errorResult != null) return errorResult; Renderer<Result> renderer = executionFactory.rendererRegistry().getRenderer(query.getPresentation().getRenderer()); if (query.getPresentation().getSummary() == null && renderer instanceof com.yahoo.search.rendering.Renderer) query.getPresentation().setSummary(((com.yahoo.search.rendering.Renderer) renderer).getDefaultSummaryClass()); Execution execution = executionFactory.newExecution(searchChain); query.getModel().setExecution(execution); if (log.isLoggable(Level.FINE) && (numRequestsLeftToTrace.getAndDecrement() > 0)) { query.setTraceLevel(Math.max(1, query.getTraceLevel())); execution.trace().setForceTimestamps(true); } else { execution.trace().setForceTimestamps(query.properties().getBoolean(FORCE_TIMESTAMPS, false)); } if (query.properties().getBoolean(DETAILED_TIMING_LOGGING, false)) { execution.context().setDetailedDiagnostics(true); } Result result = execution.search(query); ensureQuerySet(result, query); execution.fill(result, result.getQuery().getPresentation().getSummary()); traceExecutionTimes(query, result); traceVespaVersion(query); traceRequestAttributes(query); return result; } private void traceRequestAttributes(Query query) { int miminumTraceLevel = 7; if (query.getTraceLevel() >= 7) { query.trace("Request attributes: " + query.getHttpRequest().context(), miminumTraceLevel); } } /** For internal use only */ public Renderer<Result> getRendererCopy(ComponentSpecification spec) { Renderer<Result> renderer = executionFactory.rendererRegistry().getRenderer(spec); return perRenderingCopy(renderer); } private Renderer<Result> perRenderingCopy(Renderer<Result> renderer) { Renderer<Result> copy = renderer.clone(); copy.init(); return copy; } private void ensureQuerySet(Result result, Query fallbackQuery) { Query query = result.getQuery(); if (query == null) { result.setQuery(fallbackQuery); } } private Result search(String request, Query query, Chain<Searcher> searchChain) { if (query.getTraceLevel() >= 2) { query.trace("Invoking " + searchChain, false, 2); } connectionStatistics(); try { return searchAndFill(query, searchChain); } catch (ParseException e) { ErrorMessage error = ErrorMessage.createIllegalQuery("Could not parse query [" + request + "]: " + Exceptions.toMessageString(e)); log.log(Level.FINE, error::getDetailedMessage); return new Result(query, error); } catch (IllegalInputException e) { ErrorMessage error = ErrorMessage.createBadRequest("Invalid request [" + request + "]: " + Exceptions.toMessageString(e)); log.log(Level.FINE, error::getDetailedMessage); return new Result(query, error); } catch (Exception e) { log(request, query, e); return new Result(query, ErrorMessage.createUnspecifiedError("Failed: " + Exceptions.toMessageString(e), e)); } catch (LinkageError | StackOverflowError e) { ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " + Exceptions.toMessageString(e), e); log(request, query, e); return new Result(query, error); } } private void connectionStatistics() { if (maxThreads <= 3) return; int connections = requestsInFlight.intValue(); metric.set(SEARCH_CONNECTIONS, connections, null); long maxThreadsAsLong = maxThreads; long connectionsAsLong = connections; if (connectionsAsLong < maxThreadsAsLong * 9L / 10L) { } else if (connectionsAsLong == maxThreadsAsLong * 9L / 10L) { log.log(Level.WARNING, threadConsumptionMessage(connections, maxThreads, "90")); } else if (connectionsAsLong == maxThreadsAsLong * 95L / 100L) { log.log(Level.WARNING, threadConsumptionMessage(connections, maxThreads, "95")); } else if (connectionsAsLong == maxThreadsAsLong) { log.log(Level.WARNING, threadConsumptionMessage(connections, maxThreads, "100")); } } private String threadConsumptionMessage(int connections, int maxThreads, String percentage) { return percentage + "% of possible search connections (" + connections + " of maximum " + maxThreads + ") currently active."; } private void log(String request, Query query, Throwable e) { if (e.getStackTrace().length == 0) { log.log(Level.SEVERE, "Failed executing " + query.toDetailString() + " [" + request + "], received exception with no context", e); } else { log.log(Level.SEVERE, "Failed executing " + query.toDetailString() + " [" + request + "]", e); } } private Result validateQuery(Query query) { DefaultProperties.requireNotPresentIn(query.getHttpRequest().propertyMap()); int maxHits = query.properties().getInteger(DefaultProperties.MAX_HITS); int maxOffset = query.properties().getInteger(DefaultProperties.MAX_OFFSET); if (query.getHits() > maxHits) { return new Result(query, ErrorMessage.createIllegalQuery(query.getHits() + " hits requested, configured limit: " + maxHits + ". See https: } else if (query.getOffset() > maxOffset) { return new Result(query, ErrorMessage.createIllegalQuery("Offset of " + query.getOffset() + " requested, configured limit: " + maxOffset + ". See https: } return null; } private void traceExecutionTimes(Query query, Result result) { if (query.getTraceLevel() < 3) return; ElapsedTime elapsedTime = result.getElapsedTime(); long now = System.currentTimeMillis(); if (elapsedTime.firstFill() != 0) { query.trace("Query time " + query + ": " + (elapsedTime.firstFill() - elapsedTime.first()) + " ms", false, 3); query.trace("Summary fetch time " + query + ": " + (now - elapsedTime.firstFill()) + " ms", false, 3); } else { query.trace("Total search time " + query + ": " + (now - elapsedTime.first()) + " ms", false, 3); } } private void traceVespaVersion(Query query) { query.trace("Vespa version: " + Vtag.currentVersion, false, 4); } public SearchChainRegistry getSearchChainRegistry() { return executionFactory.searchChainRegistry(); } static private String getMediaType(HttpRequest request) { String header = request.getHeader(com.yahoo.jdisc.http.HttpHeaders.Names.CONTENT_TYPE); if (header == null) { return ""; } int semi = header.indexOf(';'); if (semi != -1) { header = header.substring(0, semi); } return com.yahoo.text.Lowercase.toLowerCase(header.trim()); } /** Add properties POSTed as a JSON payload, if any, to the request map */ private Map<String, String> requestMapFromRequest(HttpRequest request) { if (request.getMethod() != com.yahoo.jdisc.http.HttpRequest.Method.POST || ! JSON_CONTENT_TYPE.equals(getMediaType(request))) return request.propertyMap(); Map<String, String> requestMap = new Json2SingleLevelMap(request.getData()).parse(); requestMap.putAll(request.propertyMap()); if (requestMap.containsKey("yql") && (requestMap.containsKey("select.where") || requestMap.containsKey("select.grouping")) ) throw new IllegalInputException("Illegal query: Query contains both yql and select parameter"); if (requestMap.containsKey("query") && (requestMap.containsKey("select.where") || requestMap.containsKey("select.grouping")) ) throw new IllegalInputException("Illegal query: Query contains both query and select parameter"); return requestMap; } @Deprecated public void createRequestMapping(Inspector inspector, Map<String, String> map, String parent) { try { new Json2SingleLevelMap(new ByteArrayInputStream(inspector.toString().getBytes(StandardCharsets.UTF_8))).parse(map, parent); } catch (IOException e) { throw new RuntimeException("Failed creating request mapping for parent '" + parent + "'", e); } } @Override public RequestHandlerSpec requestHandlerSpec() { return REQUEST_HANDLER_SPEC; } private static AclMapping aclRequestMapper() { return HttpMethodAclMapping.standard() .override(com.yahoo.jdisc.http.HttpRequest.Method.POST, AclMapping.Action.READ) .build(); } private Map<String, Embedder> toMap(ComponentRegistry<Embedder> embedders) { var map = embedders.allComponentsById().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey().stringValue(), Map.Entry::getValue)); if (map.size() > 1) { map.remove(DefaultEmbedderProvider.class.getName()); } return Collections.unmodifiableMap(map); } }
Empty string is so ugly though.. Why not 'default'?
public void cloudAccount() { String r = """ <deployment version='1.0' cloud-account='100000000000,gcp:foobar'> <instance id='alpha'> <prod cloud-account='800000000000'> <region>us-east-1</region> </prod> </instance> <instance id='beta' cloud-account='200000000000'> <staging cloud-account='gcp:barbaz'/> <perf cloud-account='700000000000'/> <prod> <region>us-west-1</region> <region cloud-account='default'>us-west-2</region> <region cloud-account=''>us-west-3</region> </prod> </instance> <instance id='main'> <test cloud-account='500000000000'/> <dev cloud-account='400000000000'/> <prod> <region cloud-account='300000000000'>us-east-1</region> <region>eu-west-1</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000"), GCP, CloudAccount.from("gcp:foobar")), spec.cloudAccounts()); assertCloudAccount("800000000000", spec, AWS, "alpha", prod, "us-east-1"); assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1"); assertCloudAccount("200000000000", spec, AWS, "beta", prod, "us-west-1"); assertCloudAccount("", spec, AWS, "beta", staging, "default"); assertCloudAccount("gcp:barbaz", spec, GCP, "beta", staging, "default"); assertCloudAccount("700000000000", spec, AWS, "beta", perf, "default"); assertCloudAccount("200000000000", spec, AWS, "beta", dev, "default"); assertCloudAccount("300000000000", spec, AWS, "main", prod, "us-east-1"); assertCloudAccount("100000000000", spec, AWS, "main", prod, "eu-west-1"); assertCloudAccount("400000000000", spec, AWS, "main", dev, "default"); assertCloudAccount("500000000000", spec, AWS, "main", test, "default"); assertCloudAccount("100000000000", spec, AWS, "main", staging, "default"); assertCloudAccount("default", spec, AWS, "beta", prod, "us-west-2"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-2"); assertCloudAccount("", spec, AWS, "beta", prod, "us-west-3"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-3"); }
assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1");
public void cloudAccount() { String r = """ <deployment version='1.0' cloud-account='100000000000,gcp:foobar'> <instance id='alpha'> <prod cloud-account='800000000000'> <region>us-east-1</region> </prod> </instance> <instance id='beta' cloud-account='200000000000'> <staging cloud-account='gcp:barbaz'/> <perf cloud-account='700000000000'/> <prod> <region>us-west-1</region> <region cloud-account='default'>us-west-2</region> <region cloud-account=''>us-west-3</region> </prod> </instance> <instance id='main'> <test cloud-account='500000000000'/> <dev cloud-account='400000000000'/> <prod> <region cloud-account='300000000000'>us-east-1</region> <region>eu-west-1</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000"), GCP, CloudAccount.from("gcp:foobar")), spec.cloudAccounts()); assertCloudAccount("800000000000", spec, AWS, "alpha", prod, "us-east-1"); assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1"); assertCloudAccount("200000000000", spec, AWS, "beta", prod, "us-west-1"); assertCloudAccount("", spec, AWS, "beta", staging, "default"); assertCloudAccount("gcp:barbaz", spec, GCP, "beta", staging, "default"); assertCloudAccount("700000000000", spec, AWS, "beta", perf, "default"); assertCloudAccount("200000000000", spec, AWS, "beta", dev, "default"); assertCloudAccount("300000000000", spec, AWS, "main", prod, "us-east-1"); assertCloudAccount("100000000000", spec, AWS, "main", prod, "eu-west-1"); assertCloudAccount("400000000000", spec, AWS, "main", dev, "default"); assertCloudAccount("500000000000", spec, AWS, "main", test, "default"); assertCloudAccount("100000000000", spec, AWS, "main", staging, "default"); assertCloudAccount("default", spec, AWS, "beta", prod, "us-west-2"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-2"); assertCloudAccount("", spec, AWS, "beta", prod, "us-west-3"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-3"); }
class DeploymentSpecTest { @Test public void simpleSpec() { String specXml = "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertFalse(spec.majorVersion().isPresent()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(test)); assertTrue(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void specPinningMajorVersion() { String specXml = "<deployment version='1.0' major-version='6'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.majorVersion().isPresent()); assertEquals(6, (int)spec.majorVersion().get()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(staging)); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( """ <deployment version='1.0'> <instance id='default'> <prod> <region active='false'>us-east1</region> <region active='true'>us-west1</region> </prod> </instance> </deployment> """); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(2, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(0)).active()); assertTrue(spec.requireInstance("default").steps().get(1).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(1)).active()); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("default").upgradePolicy()); assertEquals(DeploymentSpec.RevisionTarget.latest, spec.requireInstance("default").revisionTarget()); assertEquals(DeploymentSpec.RevisionChange.whenFailing, spec.requireInstance("default").revisionChange()); assertEquals(DeploymentSpec.UpgradeRollout.separate, spec.requireInstance("default").upgradeRollout()); assertEquals(0, spec.requireInstance("default").minRisk()); assertEquals(0, spec.requireInstance("default").maxRisk()); assertEquals(8, spec.requireInstance("default").maxIdleHours()); } @Test public void specWithTags() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='a' tags='tag1 tag2'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='b' tags='tag3'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Tags.fromString("tag1 tag2"), spec.requireInstance("a").tags()); assertEquals(Tags.fromString("tag3"), spec.requireInstance("b").tags()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("default")); } @Test public void productionTests() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " <delay hours='1' />" + " <test>us-west-1</test>" + " <test>us-east-1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> instanceSteps = spec.steps().get(0).steps(); assertEquals(7, instanceSteps.size()); assertEquals("test", instanceSteps.get(0).toString()); assertEquals("staging", instanceSteps.get(1).toString()); assertEquals("prod.us-east-1", instanceSteps.get(2).toString()); assertEquals("prod.us-west-1", instanceSteps.get(3).toString()); assertEquals("delay PT1H", instanceSteps.get(4).toString()); assertEquals("tests for prod.us-west-1", instanceSteps.get(5).toString()); assertEquals("tests for prod.us-east-1", instanceSteps.get(6).toString()); } @Test(expected = IllegalArgumentException.class) public void duplicateProductionTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " <test>us-east1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestBeforeDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <test>us-east1</test>" + " <region active='true'>us-east1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestInParallelWithDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <parallel>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void maximalProductionSpecMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='instance2'>" + " <prod>" + " <region active='true'>us-central1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(1, instance2.steps().size()); assertEquals(1, instance2.zones().size()); assertTrue(instance2.steps().get(0).concerns(prod, Optional.of(RegionName.from("us-central1")))); } @Test public void multipleInstancesShortForm() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1, instance2'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); assertCorrectFirstInstance(spec.requireInstance("instance2")); } private void assertCorrectFirstInstance(DeploymentInstanceSpec instance) { assertEquals(5, instance.steps().size()); assertEquals(4, instance.zones().size()); assertTrue(instance.steps().get(0).concerns(test)); assertTrue(instance.steps().get(1).concerns(staging)); assertTrue(instance.steps().get(2).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)instance.steps().get(2)).active()); assertTrue(instance.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, instance.steps().get(3).delay().getSeconds()); assertTrue(instance.steps().get(4).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)instance.steps().get(4)).active()); assertTrue(instance.concerns(test, Optional.empty())); assertTrue(instance.concerns(test, Optional.of(RegionName.from("region1")))); assertTrue(instance.concerns(staging, Optional.empty())); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(instance.concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(instance.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.requireInstance("default").globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.requireInstance("default").globalServiceId().get()); } @Test public void productionSpecWithUpgradeRevisionSettings() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + " <instance id='custom'>" + " <upgrade revision-change='always' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("next", spec.requireInstance("default").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("custom").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("default").revisionChange().toString()); assertEquals("always", spec.requireInstance("custom").revisionChange().toString()); assertEquals(3, spec.requireInstance("default").minRisk()); assertEquals(12, spec.requireInstance("default").maxRisk()); assertEquals(32, spec.requireInstance("default").maxIdleHours()); } @Test public void productionSpecsWithIllegalRevisionSettings() { assertEquals("revision-change must be 'when-clear' when max-risk is specified, but got: 'always'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='always' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("revision-target must be 'next' when max-risk is specified, but got: 'latest'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '12'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='13' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '0'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade min-risk='3' />" + " </instance>" + "</deployment>")) .getMessage()); } @Test public void productionSpecWithUpgradeRollout() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade rollout='leading' />" + " </instance>" + " <instance id='aggressive'>" + " <upgrade rollout='simultaneous' />" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("leading", spec.requireInstance("default").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("custom").upgradeRollout().toString()); assertEquals("simultaneous", spec.requireInstance("aggressive").upgradeRollout().toString()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("default").upgradePolicy().toString()); assertEquals("defaultPolicy", spec.requireInstance("custom").upgradePolicy().toString()); } @Test public void upgradePolicyDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <upgrade policy='canary' rollout='leading' revision-target='next' revision-change='when-clear' />" + " <instance id='instance1'/>" + " <instance id='instance2'>" + " <upgrade policy='conservative' rollout='separate' revision-target='latest' revision-change='when-failing' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("instance1").upgradePolicy().toString()); assertEquals("conservative", spec.requireInstance("instance2").upgradePolicy().toString()); assertEquals("next", spec.requireInstance("instance1").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("instance2").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("instance1").revisionChange().toString()); assertEquals("whenFailing", spec.requireInstance("instance2").revisionChange().toString()); assertEquals("leading", spec.requireInstance("instance1").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("instance2").upgradeRollout().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='47'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT48H1S but max 48 hours is allowed", e.getMessage()); } } @Test public void onlyAthenzServiceDefinedInInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(1, spec.instances().size()); DeploymentInstanceSpec instance = spec.instances().get(0); assertEquals("default", instance.name().value()); assertEquals("service", instance.athenzService(prod, RegionName.defaultName()).get().value()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelSteps parallelSteps = ((DeploymentSpec.ParallelSteps) spec.requireInstance("default").steps().get(1)); assertEquals(2, parallelSteps.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelSteps.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelSteps.zones().get(1).region().get()); } @Test public void testAndStagingOutsideAndInsideInstance() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <staging/>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(4, steps.size()); assertEquals("test", steps.get(0).toString()); assertEquals("staging", steps.get(1).toString()); assertEquals("instance 'instance0'", steps.get(2).toString()); assertEquals("instance 'instance1'", steps.get(3).toString()); List<DeploymentSpec.Step> instance0Steps = ((DeploymentInstanceSpec)steps.get(2)).steps(); assertEquals(1, instance0Steps.size()); assertEquals("prod.us-west-1", instance0Steps.get(0).toString()); List<DeploymentSpec.Step> instance1Steps = ((DeploymentInstanceSpec)steps.get(3)).steps(); assertEquals(3, instance1Steps.size()); assertEquals("test", instance1Steps.get(0).toString()); assertEquals("staging", instance1Steps.get(1).toString()); assertEquals("prod.us-west-1", instance1Steps.get(2).toString()); } @Test public void nestedParallelAndSteps() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <staging />" + " <instance id='instance' athenz-service='in-service'>" + " <prod>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <steps>" + " <region active='true'>us-east-3</region>" + " <delay hours='2' />" + " <region active='true'>eu-west-1</region>" + " <delay hours='2' />" + " </steps>" + " <steps>" + " <delay hours='3' />" + " <region active='true'>aws-us-east-1a</region>" + " <parallel>" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>" + " <region active='true'>ap-southeast-2</region>" + " <test>aws-us-east-1a</test>" + " </parallel>" + " </steps>" + " <delay hours='3' minutes='30' />" + " </parallel>" + " <region active='true'>us-north-7</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(2, steps.size()); assertEquals("staging", steps.get(0).toString()); assertEquals("instance 'instance'", steps.get(1).toString()); assertEquals(Duration.ofHours(4), steps.get(1).delay()); List<DeploymentSpec.Step> instanceSteps = steps.get(1).steps(); assertEquals(2, instanceSteps.size()); assertEquals("4 parallel steps", instanceSteps.get(0).toString()); assertEquals("prod.us-north-7", instanceSteps.get(1).toString()); List<DeploymentSpec.Step> parallelSteps = instanceSteps.get(0).steps(); assertEquals(4, parallelSteps.size()); assertEquals("prod.us-west-1", parallelSteps.get(0).toString()); assertEquals("4 steps", parallelSteps.get(1).toString()); assertEquals("3 steps", parallelSteps.get(2).toString()); assertEquals("delay PT3H30M", parallelSteps.get(3).toString()); List<DeploymentSpec.Step> firstSerialSteps = parallelSteps.get(1).steps(); assertEquals(4, firstSerialSteps.size()); assertEquals("prod.us-east-3", firstSerialSteps.get(0).toString()); assertEquals("delay PT2H", firstSerialSteps.get(1).toString()); assertEquals("prod.eu-west-1", firstSerialSteps.get(2).toString()); assertEquals("delay PT2H", firstSerialSteps.get(3).toString()); List<DeploymentSpec.Step> secondSerialSteps = parallelSteps.get(2).steps(); assertEquals(3, secondSerialSteps.size()); assertEquals("delay PT3H", secondSerialSteps.get(0).toString()); assertEquals("prod.aws-us-east-1a", secondSerialSteps.get(1).toString()); assertEquals("3 parallel steps", secondSerialSteps.get(2).toString()); List<DeploymentSpec.Step> innerParallelSteps = secondSerialSteps.get(2).steps(); assertEquals(3, innerParallelSteps.size()); assertEquals("prod.ap-northeast-1", innerParallelSteps.get(0).toString()); assertEquals("no-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-northeast-1")).get().value()); assertEquals("prod.ap-southeast-2", innerParallelSteps.get(1).toString()); assertEquals("in-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-southeast-2")).get().value()); assertEquals("tests for prod.aws-us-east-1a", innerParallelSteps.get(2).toString()); } @Test public void parallelInstances() { StringReader r = new StringReader( "<deployment>" + " <parallel>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(1, steps.size()); assertEquals("2 parallel steps", steps.get(0).toString()); List<DeploymentSpec.Step> parallelSteps = steps.get(0).steps(); assertEquals("instance 'instance0'", parallelSteps.get(0).toString()); assertEquals("instance 'instance1'", parallelSteps.get(1).toString()); } @Test public void instancesWithDelay() { StringReader r = new StringReader( "<deployment>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <delay hours='12'/>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(3, steps.size()); assertEquals("instance 'instance0'", steps.get(0).toString()); assertEquals("delay PT12H", steps.get(1).toString()); assertEquals("instance 'instance1'", steps.get(2).toString()); } @Test public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePolicies() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance0'/>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesAfterParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + " <instance id='instance3'/>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithDifferentUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(DeploymentSpec.UpgradePolicy.conservative, spec.requireInstance("instance1").upgradePolicy()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("instance2").upgradePolicy()); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec1() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " <block-change days='mon,tue' hours='15-16'/>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec2() { StringReader r = new StringReader( "<deployment>\n" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <test/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithChangeBlocker() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <block-change days='mon-sun' hours='0-23' time-zone='CET' from-date='2022-01-01' to-date='2022-01-15'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(3, spec.requireInstance("default").changeBlocker().size()); assertTrue(spec.requireInstance("default").changeBlocker().get(0).blocksVersions()); assertFalse(spec.requireInstance("default").changeBlocker().get(0).blocksRevisions()); assertEquals(ZoneId.of("UTC"), spec.requireInstance("default").changeBlocker().get(0).window().zone()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksVersions()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksRevisions()); assertEquals(ZoneId.of("CET"), spec.requireInstance("default").changeBlocker().get(1).window().zone()); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T14:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T15:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T16:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T17:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T09:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T08:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T10:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2022-01-15T16:00:00.00Z"))); } @Test public void changeBlockerInheritance() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <instance id='instance1'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); String inheritedChangeBlocker = "change blocker revision=false version=true window=time window for hour(s) " + "[15, 16] on [monday, tuesday] in time zone UTC and date range [any date, any date]"; assertEquals(2, spec.requireInstance("instance1").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance1").changeBlocker().get(0).toString()); assertEquals("change blocker revision=true version=true window=time window for hour(s) [10] on " + "[saturday] in time zone CET and date range [any date, any date]", spec.requireInstance("instance1").changeBlocker().get(1).toString()); assertEquals(1, spec.requireInstance("instance2").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance2").changeBlocker().get(0).toString()); } @Test public void athenzConfigIsReadFromDeployment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZones() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-central-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-central-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZonesAndInstances() { String r = """ <deployment athenz-domain='domain' athenz-service='service'> <parallel> <instance id='instance1'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> <instance id='instance2'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> </parallel> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); assertEquals("service", spec.requireInstance("instance2").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigIsReadFromInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(Optional.empty(), spec.athenzService()); assertEquals("service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzServiceIsOverriddenFromEnvironment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='unused-service'>" + " <instance id='default' athenz-service='service'>" + " <test />" + " <staging athenz-service='staging-service' />" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("service", spec.requireInstance("default").athenzService(test, RegionName.from("us-east-1")).get().value()); assertEquals("staging-service", spec.requireInstance("default").athenzService(staging, RegionName.from("us-north-1")).get().value()); assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test(expected = IllegalArgumentException.class) public void missingAthenzServiceFails() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void athenzServiceWithoutDomainFails() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod athenz-service='service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void noNotifications() { assertEquals(Notifications.none(), DeploymentSpec.fromXml("<deployment>" + " <instance id='default'/>" + "</deployment>").requireInstance("default").notifications()); } @Test public void emptyNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <notifications/>" + " </instance>" + "</deployment>"); assertEquals(Notifications.none(), spec.requireInstance("default").notifications()); } @Test public void someNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>\n" + " <instance id='default'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@dev\" when=\"failing-commit\"/>" + " <email address=\"jane@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>"); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failing)); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failingCommit)); assertEquals(ImmutableSet.of("john@dev", "jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failingCommit)); assertEquals(ImmutableSet.of("jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failing)); } @Test public void notificationsWithMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " <notifications when=\"failing-commit\">" + " <email role=\"author\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failing)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void notificationsDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <notifications>" + " <email role=\"author\" when=\"failing\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\" when=\"failing-commit\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance1.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failingCommit)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance2.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void customTesterFlavor() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <test tester-flavor="d-1-4-20" /> <staging /> <prod tester-flavor="d-2-8-50"> <region active="false">us-north-7</region> </prod> </instance> </deployment>"""); assertEquals(Optional.of("d-1-4-20"), spec.requireInstance("default").steps().get(0).zones().get(0).testerFlavor()); assertEquals(Optional.empty(), spec.requireInstance("default").steps().get(1).zones().get(0).testerFlavor()); assertEquals(Optional.of("d-2-8-50"), spec.requireInstance("default").steps().get(2).zones().get(0).testerFlavor()); } @Test public void noEndpoints() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'/> </deployment> """); assertEquals(Collections.emptyList(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), com.yahoo.config.provision.zone.ZoneId.from("test", "us"), ClusterSpec.Id.from("cluster"))); } @Test public void emptyEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <endpoints/> </instance> </deployment>"""); assertEquals(List.of(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); } @Test public void someEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region active="true">us-east</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id='bax' type='zone' enabled='true' /> <endpoint container-id='froz' type='zone' enabled='false' /> <endpoint container-id='froz' type='private'> <region>us-east</region> <allow with='aws-private-link' arn='barn' /> <allow with='gcp-service-connect' project='nine' /> </endpoint> </endpoints> </instance> </deployment>"""); assertEquals( List.of("foo", "nalle", "default"), spec.requireInstance("default").endpoints().stream().map(Endpoint::endpointId).toList() ); assertEquals( List.of("bar", "frosk", "quux"), spec.requireInstance("default").endpoints().stream().map(Endpoint::containerId).toList() ); assertEquals(List.of(RegionName.from("us-east")), spec.requireInstance("default").endpoints().get(0).regions()); var zone = from(prod, RegionName.from("us-east")); var testZone = from(test, RegionName.from("us-east")); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("custom"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), defaultId(), ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.privateEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("froz"))); assertEquals(new ZoneEndpoint(false, true, List.of(new AllowedUrn(AccessType.awsPrivateLink, "barn"), new AllowedUrn(AccessType.gcpServiceConnect, "nine"))), spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("froz"))); } @Test public void invalidEndpoints() { assertInvalidEndpoints("<endpoint id='FOO' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'FOO'"); assertInvalidEndpoints("<endpoint id='123' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got '123'"); assertInvalidEndpoints("<endpoint id='foo!' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo!'"); assertInvalidEndpoints("<endpoint id='foo.bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo.bar'"); assertInvalidEndpoints("<endpoint id='foo--bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo--bar'"); assertInvalidEndpoints("<endpoint id='foo-' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo-'"); assertInvalidEndpoints("<endpoint id='foooooooooooo' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foooooooooooo'"); assertInvalidEndpoints("<endpoint id='foo' container-id='qrs'/><endpoint id='foo' container-id='qrs'/>", "Endpoint id 'foo' is specified multiple times"); assertInvalidEndpoints("<endpoint id='default' type='zone' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint id='default' type='private' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint type='zone' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint type='private' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint container-id='foo' type='zone'><allow /></endpoint>", "Instance-level endpoint 'default': only endpoints of type 'private' can specify 'allow' children"); assertInvalidEndpoints("<endpoint type='private' container-id='foo' enabled='true' />", "Instance-level endpoint 'default': only endpoints of type 'zone' can specify 'enabled'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs'/><endpoint type='zone' container-id='qrs'/>", "Multiple zone endpoints (for all regions) declared for container id 'qrs'"); assertInvalidEndpoints("<endpoint type='private' container-id='qrs'><region>us</region></endpoint>" + "<endpoint type='private' container-id='qrs'><region>us</region></endpoint>", "Multiple private endpoints declared for container id 'qrs' in region 'us'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs' />" + "<endpoint type='zone' container-id='qrs'><region>us</region></endpoint>", "Zone endpoint for container id 'qrs' declared both with region 'us', and for all regions."); } @Test public void validEndpoints() { assertEquals(List.of("default"), endpointIds("<endpoint container-id='qrs'/>")); assertEquals(List.of("default"), endpointIds("<endpoint id='' container-id='qrs'/>")); assertEquals(List.of("f"), endpointIds("<endpoint id='f' container-id='qrs'/>")); assertEquals(List.of("foo"), endpointIds("<endpoint id='foo' container-id='qrs'/>")); assertEquals(List.of("foo-bar"), endpointIds("<endpoint id='foo-bar' container-id='qrs'/>")); assertEquals(List.of("foo", "bar"), endpointIds("<endpoint id='foo' container-id='qrs'/><endpoint id='bar' container-id='qrs'/>")); assertEquals(List.of("fooooooooooo"), endpointIds("<endpoint id='fooooooooooo' container-id='qrs'/>")); } @Test public void endpointDefaultRegions() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>us-east</region> <region>us-west</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint container-id="bar" type='private'> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id="quux" type='private' /> </endpoints> </instance> </deployment>"""); assertEquals(Set.of("us-east"), endpointRegions("foo", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("nalle", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("default", spec)); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, false, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("quux"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("quux"))); assertEquals(new HashSet<>() {{ add(null); add(from("prod", "us-east")); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("bar")).keySet()); assertEquals(new HashSet<>() {{ add(null); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("quux")).keySet()); assertEquals(Set.of(ClusterSpec.Id.from("bar"), ClusterSpec.Id.from("quux")), spec.requireInstance("default").zoneEndpoints().keySet()); } @Test public void instanceEndpointDisallowsRegionAttributeOrInstanceTag() { String xmlForm = """ <deployment> <instance id='default'> <prod> <region active="true">us-east</region> <region active="true">us-west</region> </prod> <endpoints> <endpoint container-id="bar" %s> %s </endpoint> </endpoints> </instance> </deployment>"""; assertInvalid(String.format(xmlForm, "id='foo' region='us-east'", "<region>us-east</region>"), "Instance-level endpoint 'foo': invalid 'region' attribute"); assertInvalid(String.format(xmlForm, "id='foo'", "<instance>us-east</instance>"), "Instance-level endpoint 'foo': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='zone'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='private'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); } @Test public void applicationLevelEndpointValidation() { String xmlForm = """ <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <endpoints> <endpoint id="foo" container-id="qrs" %s> <instance %s %s>%s</instance> %s </endpoint> </endpoints> </deployment> """; assertInvalid(String.format(xmlForm, "", "weight='1'", "", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "region='us-west-1'", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "", "", "main", ""), "Missing required attribute 'weight' in 'instance"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "", ""), "Application-level endpoint 'foo': empty 'instance' element"); assertInvalid(String.format(xmlForm, "region='invalid'", "weight='1'", "", "main", ""), "Application-level endpoint 'foo': targets undeclared region 'invalid' in instance 'main'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='foo'", "", "main", ""), "Application-level endpoint 'foo': invalid weight value 'foo'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "main", "<region>us-east-3</region>"), "Application-level endpoint 'foo': invalid element 'region'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='0'", "", "main", ""), "Application-level endpoint 'foo': sum of all weights must be positive, got 0"); assertInvalid(String.format(xmlForm, "type='zone'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'zone'"); assertInvalid(String.format(xmlForm, "type='private'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'private'"); } @Test public void cannotTargetDisabledEndpoints() { assertEquals("Instance-level endpoint 'default': all eligible zone endpoints have 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id' /> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Instance-level endpoint 'default': targets zone endpoint in 'us' with 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id'> <region>us</region> </endpoint> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Application-level endpoint 'default': targets 'us' in 'default', but its zone endpoint has 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint type='zone' container-id='id' enabled='false'> <region>us</region> </endpoint> </endpoints> </instance> <endpoints> <endpoint container-id='id' region='us'> <instance weight='1'>default</instance> </endpoint> </endpoints> </deployment> """)) .getMessage()); } @Test public void applicationLevelEndpoint() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> <endpoints> <endpoint id="glob" container-id="music"/> </endpoints> </instance> <endpoints> <endpoint id="foo" container-id="movies" region='us-west-1'> <instance weight="2">beta</instance> <instance weight="8">main</instance> </endpoint> <endpoint id="bar" container-id="music" region='us-east-3'> <instance weight="10">main</instance> </endpoint> <endpoint id="baz" container-id="moose"> <instance weight="1" region='us-west-1'>main</instance> <instance weight="2" region='us-east-3'>main</instance> <instance weight="3" region='us-west-1'>beta</instance> </endpoint> </endpoints> </deployment> """); assertEquals(List.of(new Endpoint("foo", "movies", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 8))), new Endpoint("bar", "music", Level.application, List.of(new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 10))), new Endpoint("baz", "moose", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 3)))), spec.endpoints()); assertEquals(List.of(new Endpoint("glob", "music", Level.instance, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 1)))), spec.requireInstance("main").endpoints()); } @Test public void disallowExcessiveUpgradeBlocking() { List<String> specs = List.of( """ <deployment> <block-change/> </deployment>""", """ <deployment> <block-change days="mon-wed"/> <block-change days="tue-sun"/> </deployment>""", """ <deployment> <block-change to-date="2023-01-01"/> </deployment>""", """ <deployment> <block-change days="sat-sun"/> <block-change days="mon-fri" hours="0-10" from-date="2023-01-01" to-date="2023-01-15"/> <block-change days="mon-fri" hours="11-23" from-date="2023-01-01" to-date="2023-01-15"/> <block-change from-date="2023-01-14" to-date="2023-01-31"/></deployment>""" ); ManualClock clock = new ManualClock(); clock.setInstant(Instant.parse("2022-01-05T15:00:00.00Z")); for (var spec : specs) { assertInvalid(spec, "Cannot block Vespa upgrades for longer than 21 consecutive days", clock); } } @Test public void testDeployableHash() { assertEquals(DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags=' '> <test /> <staging tester-flavor='2-8-50' /> <block-change days='mon' /> <upgrade policy='canary' revision-target='next' revision-change='when-clear' rollout='simultaneous' /> <prod /> <notifications> <email role='author' /> <email address='dev@duff' /> </notifications> </instance> </deployment>""").deployableHashCode()); assertEquals(DeploymentSpec.fromXml(""" <deployment> <parallel> <instance id='one'> <prod> <region>name</region> </prod> </instance> <instance id='two' /> </parallel> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='one'> <prod> <steps> <region>name</region> <delay hours='3' /> <test>name</test> </steps> </prod> </instance> <instance id='two' /></deployment>""").deployableHashCode()); String referenceSpec = """ <deployment> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>"""; assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml("<deployment />").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags='tag1'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>other</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment major-version='9'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain' athenz-service='service'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default' athenz-service='service'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default'> <prod athenz-service='prod'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod global-service-id='service'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>name</region> </prod> <endpoints> <endpoint container-id="quux" /> </endpoints> </instance> </deployment>""").deployableHashCode()); } @Test @Test public void hostTTL() { String r = """ <deployment version='1.0' cloud-account='100000000000' empty-host-ttl='1h'> <instance id='alpha'> <staging /> <prod empty-host-ttl='1m'> <region>us-east</region> <region empty-host-ttl='2m'>us-west</region> <test>us-east</test> <test empty-host-ttl='3m'>us-west</test> </prod> </instance> <instance id='beta'> <staging empty-host-ttl='3d'/> <perf empty-host-ttl='4h'/> <prod> <region>us-east</region> <region empty-host-ttl='0d'>us-west</region> </prod> </instance> <instance id='gamma' empty-host-ttl='6h'> <dev empty-host-ttl='7d'/> <prod> <region>us-east</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000")), spec.cloudAccounts()); assertHostTTL(Duration.ofHours(1), spec, "alpha", test, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", staging, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", dev, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", perf, null); assertHostTTL(Duration.ofMinutes(1), spec, "alpha", prod, "us-east"); assertHostTTL(Duration.ofMinutes(2), spec, "alpha", prod, "us-west"); assertEquals(Optional.of(Duration.ofMinutes(1)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-east"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertEquals(Optional.of(Duration.ofMinutes(3)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-west"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertHostTTL(Duration.ofHours(1), spec, "beta", test, null); assertHostTTL(Duration.ofDays(3), spec, "beta", staging, null); assertHostTTL(Duration.ofHours(1), spec, "beta", dev, null); assertHostTTL(Duration.ofHours(4), spec, "beta", perf, null); assertHostTTL(Duration.ofHours(1), spec, "beta", prod, "us-east"); assertHostTTL(Duration.ZERO, spec, "beta", prod, "us-west"); assertHostTTL(Duration.ofHours(6), spec, "gamma", test, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", staging, null); assertHostTTL(Duration.ofDays(7), spec, "gamma", dev, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", perf, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-east"); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-west"); assertHostTTL(Duration.ofHours(1), spec, "nope", test, null); assertHostTTL(Duration.ofHours(1), spec, "nope", staging, null); assertHostTTL(Duration.ofHours(1), spec, "nope", dev, null); assertHostTTL(Duration.ofHours(1), spec, "nope", perf, null); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-east"); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-west"); } private void assertCloudAccount(String expected, DeploymentSpec spec, CloudName cloud, String instance, Environment environment, String region) { assertEquals(CloudAccount.from(expected), spec.cloudAccount(cloud, InstanceName.from(instance), com.yahoo.config.provision.zone.ZoneId.from(environment, RegionName.from(region)))); } private void assertHostTTL(Duration expected, DeploymentSpec spec, String instance, Environment environment, String region) { assertEquals(Optional.of(expected), spec.hostTTL(InstanceName.from(instance), environment, region == null ? RegionName.defaultName() : RegionName.from(region))); } private static void assertInvalid(String deploymentSpec, String errorMessagePart) { assertInvalid(deploymentSpec, errorMessagePart, new ManualClock()); } private static void assertInvalid(String deploymentSpec, String errorMessagePart, Clock clock) { if (errorMessagePart.isEmpty()) throw new IllegalArgumentException("Message part must be non-empty"); try { new DeploymentSpecXmlReader(true, clock).read(deploymentSpec); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue("\"" + e.getMessage() + "\" contains \"" + errorMessagePart + "\"", e.getMessage().contains(errorMessagePart)); } } private static void assertInvalidEndpoints(String endpointsBody, String error) { assertEquals(error, assertThrows(IllegalArgumentException.class, () -> endpointIds(endpointsBody)) .getMessage()); } private static Set<String> endpointRegions(String endpointId, DeploymentSpec spec) { return spec.requireInstance("default").endpoints().stream() .filter(endpoint -> endpoint.endpointId().equals(endpointId)) .flatMap(endpoint -> endpoint.regions().stream()) .map(RegionName::value) .collect(Collectors.toSet()); } private static List<String> endpointIds(String endpointsBody) { var xml = "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active=\"true\">us-east</region>" + " </prod>" + " <endpoints>" + endpointsBody + " </endpoints>" + " </instance>" + "</deployment>"; return DeploymentSpec.fromXml(xml).requireInstance("default").endpoints().stream() .map(Endpoint::endpointId) .toList(); } }
class DeploymentSpecTest { @Test public void simpleSpec() { String specXml = "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertFalse(spec.majorVersion().isPresent()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(test)); assertTrue(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void specPinningMajorVersion() { String specXml = "<deployment version='1.0' major-version='6'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.majorVersion().isPresent()); assertEquals(6, (int)spec.majorVersion().get()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(staging)); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( """ <deployment version='1.0'> <instance id='default'> <prod> <region active='false'>us-east1</region> <region active='true'>us-west1</region> </prod> </instance> </deployment> """); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(2, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(0)).active()); assertTrue(spec.requireInstance("default").steps().get(1).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(1)).active()); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("default").upgradePolicy()); assertEquals(DeploymentSpec.RevisionTarget.latest, spec.requireInstance("default").revisionTarget()); assertEquals(DeploymentSpec.RevisionChange.whenFailing, spec.requireInstance("default").revisionChange()); assertEquals(DeploymentSpec.UpgradeRollout.separate, spec.requireInstance("default").upgradeRollout()); assertEquals(0, spec.requireInstance("default").minRisk()); assertEquals(0, spec.requireInstance("default").maxRisk()); assertEquals(8, spec.requireInstance("default").maxIdleHours()); } @Test public void specWithTags() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='a' tags='tag1 tag2'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='b' tags='tag3'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Tags.fromString("tag1 tag2"), spec.requireInstance("a").tags()); assertEquals(Tags.fromString("tag3"), spec.requireInstance("b").tags()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("default")); } @Test public void productionTests() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " <delay hours='1' />" + " <test>us-west-1</test>" + " <test>us-east-1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> instanceSteps = spec.steps().get(0).steps(); assertEquals(7, instanceSteps.size()); assertEquals("test", instanceSteps.get(0).toString()); assertEquals("staging", instanceSteps.get(1).toString()); assertEquals("prod.us-east-1", instanceSteps.get(2).toString()); assertEquals("prod.us-west-1", instanceSteps.get(3).toString()); assertEquals("delay PT1H", instanceSteps.get(4).toString()); assertEquals("tests for prod.us-west-1", instanceSteps.get(5).toString()); assertEquals("tests for prod.us-east-1", instanceSteps.get(6).toString()); } @Test(expected = IllegalArgumentException.class) public void duplicateProductionTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " <test>us-east1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestBeforeDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <test>us-east1</test>" + " <region active='true'>us-east1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestInParallelWithDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <parallel>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void maximalProductionSpecMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='instance2'>" + " <prod>" + " <region active='true'>us-central1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(1, instance2.steps().size()); assertEquals(1, instance2.zones().size()); assertTrue(instance2.steps().get(0).concerns(prod, Optional.of(RegionName.from("us-central1")))); } @Test public void multipleInstancesShortForm() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1, instance2'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); assertCorrectFirstInstance(spec.requireInstance("instance2")); } private void assertCorrectFirstInstance(DeploymentInstanceSpec instance) { assertEquals(5, instance.steps().size()); assertEquals(4, instance.zones().size()); assertTrue(instance.steps().get(0).concerns(test)); assertTrue(instance.steps().get(1).concerns(staging)); assertTrue(instance.steps().get(2).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)instance.steps().get(2)).active()); assertTrue(instance.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, instance.steps().get(3).delay().getSeconds()); assertTrue(instance.steps().get(4).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)instance.steps().get(4)).active()); assertTrue(instance.concerns(test, Optional.empty())); assertTrue(instance.concerns(test, Optional.of(RegionName.from("region1")))); assertTrue(instance.concerns(staging, Optional.empty())); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(instance.concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(instance.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.requireInstance("default").globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.requireInstance("default").globalServiceId().get()); } @Test public void productionSpecWithUpgradeRevisionSettings() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + " <instance id='custom'>" + " <upgrade revision-change='always' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("next", spec.requireInstance("default").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("custom").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("default").revisionChange().toString()); assertEquals("always", spec.requireInstance("custom").revisionChange().toString()); assertEquals(3, spec.requireInstance("default").minRisk()); assertEquals(12, spec.requireInstance("default").maxRisk()); assertEquals(32, spec.requireInstance("default").maxIdleHours()); } @Test public void productionSpecsWithIllegalRevisionSettings() { assertEquals("revision-change must be 'when-clear' when max-risk is specified, but got: 'always'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='always' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("revision-target must be 'next' when max-risk is specified, but got: 'latest'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '12'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='13' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '0'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade min-risk='3' />" + " </instance>" + "</deployment>")) .getMessage()); } @Test public void productionSpecWithUpgradeRollout() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade rollout='leading' />" + " </instance>" + " <instance id='aggressive'>" + " <upgrade rollout='simultaneous' />" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("leading", spec.requireInstance("default").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("custom").upgradeRollout().toString()); assertEquals("simultaneous", spec.requireInstance("aggressive").upgradeRollout().toString()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("default").upgradePolicy().toString()); assertEquals("defaultPolicy", spec.requireInstance("custom").upgradePolicy().toString()); } @Test public void upgradePolicyDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <upgrade policy='canary' rollout='leading' revision-target='next' revision-change='when-clear' />" + " <instance id='instance1'/>" + " <instance id='instance2'>" + " <upgrade policy='conservative' rollout='separate' revision-target='latest' revision-change='when-failing' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("instance1").upgradePolicy().toString()); assertEquals("conservative", spec.requireInstance("instance2").upgradePolicy().toString()); assertEquals("next", spec.requireInstance("instance1").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("instance2").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("instance1").revisionChange().toString()); assertEquals("whenFailing", spec.requireInstance("instance2").revisionChange().toString()); assertEquals("leading", spec.requireInstance("instance1").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("instance2").upgradeRollout().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='47'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT48H1S but max 48 hours is allowed", e.getMessage()); } } @Test public void onlyAthenzServiceDefinedInInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(1, spec.instances().size()); DeploymentInstanceSpec instance = spec.instances().get(0); assertEquals("default", instance.name().value()); assertEquals("service", instance.athenzService(prod, RegionName.defaultName()).get().value()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelSteps parallelSteps = ((DeploymentSpec.ParallelSteps) spec.requireInstance("default").steps().get(1)); assertEquals(2, parallelSteps.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelSteps.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelSteps.zones().get(1).region().get()); } @Test public void testAndStagingOutsideAndInsideInstance() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <staging/>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(4, steps.size()); assertEquals("test", steps.get(0).toString()); assertEquals("staging", steps.get(1).toString()); assertEquals("instance 'instance0'", steps.get(2).toString()); assertEquals("instance 'instance1'", steps.get(3).toString()); List<DeploymentSpec.Step> instance0Steps = ((DeploymentInstanceSpec)steps.get(2)).steps(); assertEquals(1, instance0Steps.size()); assertEquals("prod.us-west-1", instance0Steps.get(0).toString()); List<DeploymentSpec.Step> instance1Steps = ((DeploymentInstanceSpec)steps.get(3)).steps(); assertEquals(3, instance1Steps.size()); assertEquals("test", instance1Steps.get(0).toString()); assertEquals("staging", instance1Steps.get(1).toString()); assertEquals("prod.us-west-1", instance1Steps.get(2).toString()); } @Test public void nestedParallelAndSteps() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <staging />" + " <instance id='instance' athenz-service='in-service'>" + " <prod>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <steps>" + " <region active='true'>us-east-3</region>" + " <delay hours='2' />" + " <region active='true'>eu-west-1</region>" + " <delay hours='2' />" + " </steps>" + " <steps>" + " <delay hours='3' />" + " <region active='true'>aws-us-east-1a</region>" + " <parallel>" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>" + " <region active='true'>ap-southeast-2</region>" + " <test>aws-us-east-1a</test>" + " </parallel>" + " </steps>" + " <delay hours='3' minutes='30' />" + " </parallel>" + " <region active='true'>us-north-7</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(2, steps.size()); assertEquals("staging", steps.get(0).toString()); assertEquals("instance 'instance'", steps.get(1).toString()); assertEquals(Duration.ofHours(4), steps.get(1).delay()); List<DeploymentSpec.Step> instanceSteps = steps.get(1).steps(); assertEquals(2, instanceSteps.size()); assertEquals("4 parallel steps", instanceSteps.get(0).toString()); assertEquals("prod.us-north-7", instanceSteps.get(1).toString()); List<DeploymentSpec.Step> parallelSteps = instanceSteps.get(0).steps(); assertEquals(4, parallelSteps.size()); assertEquals("prod.us-west-1", parallelSteps.get(0).toString()); assertEquals("4 steps", parallelSteps.get(1).toString()); assertEquals("3 steps", parallelSteps.get(2).toString()); assertEquals("delay PT3H30M", parallelSteps.get(3).toString()); List<DeploymentSpec.Step> firstSerialSteps = parallelSteps.get(1).steps(); assertEquals(4, firstSerialSteps.size()); assertEquals("prod.us-east-3", firstSerialSteps.get(0).toString()); assertEquals("delay PT2H", firstSerialSteps.get(1).toString()); assertEquals("prod.eu-west-1", firstSerialSteps.get(2).toString()); assertEquals("delay PT2H", firstSerialSteps.get(3).toString()); List<DeploymentSpec.Step> secondSerialSteps = parallelSteps.get(2).steps(); assertEquals(3, secondSerialSteps.size()); assertEquals("delay PT3H", secondSerialSteps.get(0).toString()); assertEquals("prod.aws-us-east-1a", secondSerialSteps.get(1).toString()); assertEquals("3 parallel steps", secondSerialSteps.get(2).toString()); List<DeploymentSpec.Step> innerParallelSteps = secondSerialSteps.get(2).steps(); assertEquals(3, innerParallelSteps.size()); assertEquals("prod.ap-northeast-1", innerParallelSteps.get(0).toString()); assertEquals("no-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-northeast-1")).get().value()); assertEquals("prod.ap-southeast-2", innerParallelSteps.get(1).toString()); assertEquals("in-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-southeast-2")).get().value()); assertEquals("tests for prod.aws-us-east-1a", innerParallelSteps.get(2).toString()); } @Test public void parallelInstances() { StringReader r = new StringReader( "<deployment>" + " <parallel>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(1, steps.size()); assertEquals("2 parallel steps", steps.get(0).toString()); List<DeploymentSpec.Step> parallelSteps = steps.get(0).steps(); assertEquals("instance 'instance0'", parallelSteps.get(0).toString()); assertEquals("instance 'instance1'", parallelSteps.get(1).toString()); } @Test public void instancesWithDelay() { StringReader r = new StringReader( "<deployment>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <delay hours='12'/>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(3, steps.size()); assertEquals("instance 'instance0'", steps.get(0).toString()); assertEquals("delay PT12H", steps.get(1).toString()); assertEquals("instance 'instance1'", steps.get(2).toString()); } @Test public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePolicies() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance0'/>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesAfterParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + " <instance id='instance3'/>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithDifferentUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(DeploymentSpec.UpgradePolicy.conservative, spec.requireInstance("instance1").upgradePolicy()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("instance2").upgradePolicy()); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec1() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " <block-change days='mon,tue' hours='15-16'/>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec2() { StringReader r = new StringReader( "<deployment>\n" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <test/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithChangeBlocker() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <block-change days='mon-sun' hours='0-23' time-zone='CET' from-date='2022-01-01' to-date='2022-01-15'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(3, spec.requireInstance("default").changeBlocker().size()); assertTrue(spec.requireInstance("default").changeBlocker().get(0).blocksVersions()); assertFalse(spec.requireInstance("default").changeBlocker().get(0).blocksRevisions()); assertEquals(ZoneId.of("UTC"), spec.requireInstance("default").changeBlocker().get(0).window().zone()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksVersions()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksRevisions()); assertEquals(ZoneId.of("CET"), spec.requireInstance("default").changeBlocker().get(1).window().zone()); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T14:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T15:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T16:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T17:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T09:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T08:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T10:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2022-01-15T16:00:00.00Z"))); } @Test public void changeBlockerInheritance() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <instance id='instance1'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); String inheritedChangeBlocker = "change blocker revision=false version=true window=time window for hour(s) " + "[15, 16] on [monday, tuesday] in time zone UTC and date range [any date, any date]"; assertEquals(2, spec.requireInstance("instance1").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance1").changeBlocker().get(0).toString()); assertEquals("change blocker revision=true version=true window=time window for hour(s) [10] on " + "[saturday] in time zone CET and date range [any date, any date]", spec.requireInstance("instance1").changeBlocker().get(1).toString()); assertEquals(1, spec.requireInstance("instance2").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance2").changeBlocker().get(0).toString()); } @Test public void athenzConfigIsReadFromDeployment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZones() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-central-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-central-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZonesAndInstances() { String r = """ <deployment athenz-domain='domain' athenz-service='service'> <parallel> <instance id='instance1'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> <instance id='instance2'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> </parallel> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); assertEquals("service", spec.requireInstance("instance2").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigIsReadFromInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(Optional.empty(), spec.athenzService()); assertEquals("service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzServiceIsOverriddenFromEnvironment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='unused-service'>" + " <instance id='default' athenz-service='service'>" + " <test />" + " <staging athenz-service='staging-service' />" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("service", spec.requireInstance("default").athenzService(test, RegionName.from("us-east-1")).get().value()); assertEquals("staging-service", spec.requireInstance("default").athenzService(staging, RegionName.from("us-north-1")).get().value()); assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test(expected = IllegalArgumentException.class) public void missingAthenzServiceFails() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void athenzServiceWithoutDomainFails() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod athenz-service='service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void noNotifications() { assertEquals(Notifications.none(), DeploymentSpec.fromXml("<deployment>" + " <instance id='default'/>" + "</deployment>").requireInstance("default").notifications()); } @Test public void emptyNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <notifications/>" + " </instance>" + "</deployment>"); assertEquals(Notifications.none(), spec.requireInstance("default").notifications()); } @Test public void someNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>\n" + " <instance id='default'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@dev\" when=\"failing-commit\"/>" + " <email address=\"jane@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>"); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failing)); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failingCommit)); assertEquals(ImmutableSet.of("john@dev", "jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failingCommit)); assertEquals(ImmutableSet.of("jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failing)); } @Test public void notificationsWithMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " <notifications when=\"failing-commit\">" + " <email role=\"author\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failing)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void notificationsDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <notifications>" + " <email role=\"author\" when=\"failing\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\" when=\"failing-commit\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance1.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failingCommit)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance2.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void customTesterFlavor() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <test tester-flavor="d-1-4-20" /> <staging /> <prod tester-flavor="d-2-8-50"> <region active="false">us-north-7</region> </prod> </instance> </deployment>"""); assertEquals(Optional.of("d-1-4-20"), spec.requireInstance("default").steps().get(0).zones().get(0).testerFlavor()); assertEquals(Optional.empty(), spec.requireInstance("default").steps().get(1).zones().get(0).testerFlavor()); assertEquals(Optional.of("d-2-8-50"), spec.requireInstance("default").steps().get(2).zones().get(0).testerFlavor()); } @Test public void noEndpoints() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'/> </deployment> """); assertEquals(Collections.emptyList(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), com.yahoo.config.provision.zone.ZoneId.from("test", "us"), ClusterSpec.Id.from("cluster"))); } @Test public void emptyEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <endpoints/> </instance> </deployment>"""); assertEquals(List.of(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); } @Test public void someEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region active="true">us-east</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id='bax' type='zone' enabled='true' /> <endpoint container-id='froz' type='zone' enabled='false' /> <endpoint container-id='froz' type='private'> <region>us-east</region> <allow with='aws-private-link' arn='barn' /> <allow with='gcp-service-connect' project='nine' /> </endpoint> </endpoints> </instance> </deployment>"""); assertEquals( List.of("foo", "nalle", "default"), spec.requireInstance("default").endpoints().stream().map(Endpoint::endpointId).toList() ); assertEquals( List.of("bar", "frosk", "quux"), spec.requireInstance("default").endpoints().stream().map(Endpoint::containerId).toList() ); assertEquals(List.of(RegionName.from("us-east")), spec.requireInstance("default").endpoints().get(0).regions()); var zone = from(prod, RegionName.from("us-east")); var testZone = from(test, RegionName.from("us-east")); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("custom"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), defaultId(), ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.privateEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("froz"))); assertEquals(new ZoneEndpoint(false, true, List.of(new AllowedUrn(AccessType.awsPrivateLink, "barn"), new AllowedUrn(AccessType.gcpServiceConnect, "nine"))), spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("froz"))); } @Test public void invalidEndpoints() { assertInvalidEndpoints("<endpoint id='FOO' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'FOO'"); assertInvalidEndpoints("<endpoint id='123' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got '123'"); assertInvalidEndpoints("<endpoint id='foo!' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo!'"); assertInvalidEndpoints("<endpoint id='foo.bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo.bar'"); assertInvalidEndpoints("<endpoint id='foo--bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo--bar'"); assertInvalidEndpoints("<endpoint id='foo-' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo-'"); assertInvalidEndpoints("<endpoint id='foooooooooooo' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foooooooooooo'"); assertInvalidEndpoints("<endpoint id='foo' container-id='qrs'/><endpoint id='foo' container-id='qrs'/>", "Endpoint id 'foo' is specified multiple times"); assertInvalidEndpoints("<endpoint id='default' type='zone' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint id='default' type='private' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint type='zone' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint type='private' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint container-id='foo' type='zone'><allow /></endpoint>", "Instance-level endpoint 'default': only endpoints of type 'private' can specify 'allow' children"); assertInvalidEndpoints("<endpoint type='private' container-id='foo' enabled='true' />", "Instance-level endpoint 'default': only endpoints of type 'zone' can specify 'enabled'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs'/><endpoint type='zone' container-id='qrs'/>", "Multiple zone endpoints (for all regions) declared for container id 'qrs'"); assertInvalidEndpoints("<endpoint type='private' container-id='qrs'><region>us</region></endpoint>" + "<endpoint type='private' container-id='qrs'><region>us</region></endpoint>", "Multiple private endpoints declared for container id 'qrs' in region 'us'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs' />" + "<endpoint type='zone' container-id='qrs'><region>us</region></endpoint>", "Zone endpoint for container id 'qrs' declared both with region 'us', and for all regions."); } @Test public void validEndpoints() { assertEquals(List.of("default"), endpointIds("<endpoint container-id='qrs'/>")); assertEquals(List.of("default"), endpointIds("<endpoint id='' container-id='qrs'/>")); assertEquals(List.of("f"), endpointIds("<endpoint id='f' container-id='qrs'/>")); assertEquals(List.of("foo"), endpointIds("<endpoint id='foo' container-id='qrs'/>")); assertEquals(List.of("foo-bar"), endpointIds("<endpoint id='foo-bar' container-id='qrs'/>")); assertEquals(List.of("foo", "bar"), endpointIds("<endpoint id='foo' container-id='qrs'/><endpoint id='bar' container-id='qrs'/>")); assertEquals(List.of("fooooooooooo"), endpointIds("<endpoint id='fooooooooooo' container-id='qrs'/>")); } @Test public void endpointDefaultRegions() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>us-east</region> <region>us-west</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint container-id="bar" type='private'> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id="quux" type='private' /> </endpoints> </instance> </deployment>"""); assertEquals(Set.of("us-east"), endpointRegions("foo", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("nalle", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("default", spec)); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, false, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("quux"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("quux"))); assertEquals(new HashSet<>() {{ add(null); add(from("prod", "us-east")); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("bar")).keySet()); assertEquals(new HashSet<>() {{ add(null); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("quux")).keySet()); assertEquals(Set.of(ClusterSpec.Id.from("bar"), ClusterSpec.Id.from("quux")), spec.requireInstance("default").zoneEndpoints().keySet()); } @Test public void instanceEndpointDisallowsRegionAttributeOrInstanceTag() { String xmlForm = """ <deployment> <instance id='default'> <prod> <region active="true">us-east</region> <region active="true">us-west</region> </prod> <endpoints> <endpoint container-id="bar" %s> %s </endpoint> </endpoints> </instance> </deployment>"""; assertInvalid(String.format(xmlForm, "id='foo' region='us-east'", "<region>us-east</region>"), "Instance-level endpoint 'foo': invalid 'region' attribute"); assertInvalid(String.format(xmlForm, "id='foo'", "<instance>us-east</instance>"), "Instance-level endpoint 'foo': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='zone'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='private'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); } @Test public void applicationLevelEndpointValidation() { String xmlForm = """ <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <endpoints> <endpoint id="foo" container-id="qrs" %s> <instance %s %s>%s</instance> %s </endpoint> </endpoints> </deployment> """; assertInvalid(String.format(xmlForm, "", "weight='1'", "", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "region='us-west-1'", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "", "", "main", ""), "Missing required attribute 'weight' in 'instance"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "", ""), "Application-level endpoint 'foo': empty 'instance' element"); assertInvalid(String.format(xmlForm, "region='invalid'", "weight='1'", "", "main", ""), "Application-level endpoint 'foo': targets undeclared region 'invalid' in instance 'main'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='foo'", "", "main", ""), "Application-level endpoint 'foo': invalid weight value 'foo'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "main", "<region>us-east-3</region>"), "Application-level endpoint 'foo': invalid element 'region'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='0'", "", "main", ""), "Application-level endpoint 'foo': sum of all weights must be positive, got 0"); assertInvalid(String.format(xmlForm, "type='zone'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'zone'"); assertInvalid(String.format(xmlForm, "type='private'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'private'"); } @Test public void cannotTargetDisabledEndpoints() { assertEquals("Instance-level endpoint 'default': all eligible zone endpoints have 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id' /> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Instance-level endpoint 'default': targets zone endpoint in 'us' with 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id'> <region>us</region> </endpoint> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Application-level endpoint 'default': targets 'us' in 'default', but its zone endpoint has 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint type='zone' container-id='id' enabled='false'> <region>us</region> </endpoint> </endpoints> </instance> <endpoints> <endpoint container-id='id' region='us'> <instance weight='1'>default</instance> </endpoint> </endpoints> </deployment> """)) .getMessage()); } @Test public void applicationLevelEndpoint() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> <endpoints> <endpoint id="glob" container-id="music"/> </endpoints> </instance> <endpoints> <endpoint id="foo" container-id="movies" region='us-west-1'> <instance weight="2">beta</instance> <instance weight="8">main</instance> </endpoint> <endpoint id="bar" container-id="music" region='us-east-3'> <instance weight="10">main</instance> </endpoint> <endpoint id="baz" container-id="moose"> <instance weight="1" region='us-west-1'>main</instance> <instance weight="2" region='us-east-3'>main</instance> <instance weight="3" region='us-west-1'>beta</instance> </endpoint> </endpoints> </deployment> """); assertEquals(List.of(new Endpoint("foo", "movies", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 8))), new Endpoint("bar", "music", Level.application, List.of(new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 10))), new Endpoint("baz", "moose", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 3)))), spec.endpoints()); assertEquals(List.of(new Endpoint("glob", "music", Level.instance, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 1)))), spec.requireInstance("main").endpoints()); } @Test public void disallowExcessiveUpgradeBlocking() { List<String> specs = List.of( """ <deployment> <block-change/> </deployment>""", """ <deployment> <block-change days="mon-wed"/> <block-change days="tue-sun"/> </deployment>""", """ <deployment> <block-change to-date="2023-01-01"/> </deployment>""", """ <deployment> <block-change days="sat-sun"/> <block-change days="mon-fri" hours="0-10" from-date="2023-01-01" to-date="2023-01-15"/> <block-change days="mon-fri" hours="11-23" from-date="2023-01-01" to-date="2023-01-15"/> <block-change from-date="2023-01-14" to-date="2023-01-31"/></deployment>""" ); ManualClock clock = new ManualClock(); clock.setInstant(Instant.parse("2022-01-05T15:00:00.00Z")); for (var spec : specs) { assertInvalid(spec, "Cannot block Vespa upgrades for longer than 21 consecutive days", clock); } } @Test public void testDeployableHash() { assertEquals(DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags=' '> <test /> <staging tester-flavor='2-8-50' /> <block-change days='mon' /> <upgrade policy='canary' revision-target='next' revision-change='when-clear' rollout='simultaneous' /> <prod /> <notifications> <email role='author' /> <email address='dev@duff' /> </notifications> </instance> </deployment>""").deployableHashCode()); assertEquals(DeploymentSpec.fromXml(""" <deployment> <parallel> <instance id='one'> <prod> <region>name</region> </prod> </instance> <instance id='two' /> </parallel> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='one'> <prod> <steps> <region>name</region> <delay hours='3' /> <test>name</test> </steps> </prod> </instance> <instance id='two' /></deployment>""").deployableHashCode()); String referenceSpec = """ <deployment> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>"""; assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml("<deployment />").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags='tag1'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>other</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment major-version='9'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain' athenz-service='service'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default' athenz-service='service'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default'> <prod athenz-service='prod'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod global-service-id='service'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>name</region> </prod> <endpoints> <endpoint container-id="quux" /> </endpoints> </instance> </deployment>""").deployableHashCode()); } @Test @Test public void hostTTL() { String r = """ <deployment version='1.0' cloud-account='100000000000' empty-host-ttl='1h'> <instance id='alpha'> <staging /> <prod empty-host-ttl='1m'> <region>us-east</region> <region empty-host-ttl='2m'>us-west</region> <test>us-east</test> <test empty-host-ttl='3m'>us-west</test> </prod> </instance> <instance id='beta'> <staging empty-host-ttl='3d'/> <perf empty-host-ttl='4h'/> <prod> <region>us-east</region> <region empty-host-ttl='0d'>us-west</region> </prod> </instance> <instance id='gamma' empty-host-ttl='6h'> <dev empty-host-ttl='7d'/> <prod> <region>us-east</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000")), spec.cloudAccounts()); assertHostTTL(Duration.ofHours(1), spec, "alpha", test, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", staging, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", dev, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", perf, null); assertHostTTL(Duration.ofMinutes(1), spec, "alpha", prod, "us-east"); assertHostTTL(Duration.ofMinutes(2), spec, "alpha", prod, "us-west"); assertEquals(Optional.of(Duration.ofMinutes(1)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-east"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertEquals(Optional.of(Duration.ofMinutes(3)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-west"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertHostTTL(Duration.ofHours(1), spec, "beta", test, null); assertHostTTL(Duration.ofDays(3), spec, "beta", staging, null); assertHostTTL(Duration.ofHours(1), spec, "beta", dev, null); assertHostTTL(Duration.ofHours(4), spec, "beta", perf, null); assertHostTTL(Duration.ofHours(1), spec, "beta", prod, "us-east"); assertHostTTL(Duration.ZERO, spec, "beta", prod, "us-west"); assertHostTTL(Duration.ofHours(6), spec, "gamma", test, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", staging, null); assertHostTTL(Duration.ofDays(7), spec, "gamma", dev, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", perf, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-east"); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-west"); assertHostTTL(Duration.ofHours(1), spec, "nope", test, null); assertHostTTL(Duration.ofHours(1), spec, "nope", staging, null); assertHostTTL(Duration.ofHours(1), spec, "nope", dev, null); assertHostTTL(Duration.ofHours(1), spec, "nope", perf, null); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-east"); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-west"); } private void assertCloudAccount(String expected, DeploymentSpec spec, CloudName cloud, String instance, Environment environment, String region) { assertEquals(CloudAccount.from(expected), spec.cloudAccount(cloud, InstanceName.from(instance), com.yahoo.config.provision.zone.ZoneId.from(environment, RegionName.from(region)))); } private void assertHostTTL(Duration expected, DeploymentSpec spec, String instance, Environment environment, String region) { assertEquals(Optional.of(expected), spec.hostTTL(InstanceName.from(instance), environment, region == null ? RegionName.defaultName() : RegionName.from(region))); } private static void assertInvalid(String deploymentSpec, String errorMessagePart) { assertInvalid(deploymentSpec, errorMessagePart, new ManualClock()); } private static void assertInvalid(String deploymentSpec, String errorMessagePart, Clock clock) { if (errorMessagePart.isEmpty()) throw new IllegalArgumentException("Message part must be non-empty"); try { new DeploymentSpecXmlReader(true, clock).read(deploymentSpec); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue("\"" + e.getMessage() + "\" contains \"" + errorMessagePart + "\"", e.getMessage().contains(errorMessagePart)); } } private static void assertInvalidEndpoints(String endpointsBody, String error) { assertEquals(error, assertThrows(IllegalArgumentException.class, () -> endpointIds(endpointsBody)) .getMessage()); } private static Set<String> endpointRegions(String endpointId, DeploymentSpec spec) { return spec.requireInstance("default").endpoints().stream() .filter(endpoint -> endpoint.endpointId().equals(endpointId)) .flatMap(endpoint -> endpoint.regions().stream()) .map(RegionName::value) .collect(Collectors.toSet()); } private static List<String> endpointIds(String endpointsBody) { var xml = "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active=\"true\">us-east</region>" + " </prod>" + " <endpoints>" + endpointsBody + " </endpoints>" + " </instance>" + "</deployment>"; return DeploymentSpec.fromXml(xml).requireInstance("default").endpoints().stream() .map(Endpoint::endpointId) .toList(); } }
Both work ... but empty string here means custom accounts for _no_ clouds, because the semantics of the string is that each cloud with a custom account is listed, comma separated, in the format accepted by `CloudAccount.from`, which is documentable. If we want, we can document that `default` is an alias for that.
public void cloudAccount() { String r = """ <deployment version='1.0' cloud-account='100000000000,gcp:foobar'> <instance id='alpha'> <prod cloud-account='800000000000'> <region>us-east-1</region> </prod> </instance> <instance id='beta' cloud-account='200000000000'> <staging cloud-account='gcp:barbaz'/> <perf cloud-account='700000000000'/> <prod> <region>us-west-1</region> <region cloud-account='default'>us-west-2</region> <region cloud-account=''>us-west-3</region> </prod> </instance> <instance id='main'> <test cloud-account='500000000000'/> <dev cloud-account='400000000000'/> <prod> <region cloud-account='300000000000'>us-east-1</region> <region>eu-west-1</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000"), GCP, CloudAccount.from("gcp:foobar")), spec.cloudAccounts()); assertCloudAccount("800000000000", spec, AWS, "alpha", prod, "us-east-1"); assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1"); assertCloudAccount("200000000000", spec, AWS, "beta", prod, "us-west-1"); assertCloudAccount("", spec, AWS, "beta", staging, "default"); assertCloudAccount("gcp:barbaz", spec, GCP, "beta", staging, "default"); assertCloudAccount("700000000000", spec, AWS, "beta", perf, "default"); assertCloudAccount("200000000000", spec, AWS, "beta", dev, "default"); assertCloudAccount("300000000000", spec, AWS, "main", prod, "us-east-1"); assertCloudAccount("100000000000", spec, AWS, "main", prod, "eu-west-1"); assertCloudAccount("400000000000", spec, AWS, "main", dev, "default"); assertCloudAccount("500000000000", spec, AWS, "main", test, "default"); assertCloudAccount("100000000000", spec, AWS, "main", staging, "default"); assertCloudAccount("default", spec, AWS, "beta", prod, "us-west-2"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-2"); assertCloudAccount("", spec, AWS, "beta", prod, "us-west-3"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-3"); }
assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1");
public void cloudAccount() { String r = """ <deployment version='1.0' cloud-account='100000000000,gcp:foobar'> <instance id='alpha'> <prod cloud-account='800000000000'> <region>us-east-1</region> </prod> </instance> <instance id='beta' cloud-account='200000000000'> <staging cloud-account='gcp:barbaz'/> <perf cloud-account='700000000000'/> <prod> <region>us-west-1</region> <region cloud-account='default'>us-west-2</region> <region cloud-account=''>us-west-3</region> </prod> </instance> <instance id='main'> <test cloud-account='500000000000'/> <dev cloud-account='400000000000'/> <prod> <region cloud-account='300000000000'>us-east-1</region> <region>eu-west-1</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000"), GCP, CloudAccount.from("gcp:foobar")), spec.cloudAccounts()); assertCloudAccount("800000000000", spec, AWS, "alpha", prod, "us-east-1"); assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1"); assertCloudAccount("200000000000", spec, AWS, "beta", prod, "us-west-1"); assertCloudAccount("", spec, AWS, "beta", staging, "default"); assertCloudAccount("gcp:barbaz", spec, GCP, "beta", staging, "default"); assertCloudAccount("700000000000", spec, AWS, "beta", perf, "default"); assertCloudAccount("200000000000", spec, AWS, "beta", dev, "default"); assertCloudAccount("300000000000", spec, AWS, "main", prod, "us-east-1"); assertCloudAccount("100000000000", spec, AWS, "main", prod, "eu-west-1"); assertCloudAccount("400000000000", spec, AWS, "main", dev, "default"); assertCloudAccount("500000000000", spec, AWS, "main", test, "default"); assertCloudAccount("100000000000", spec, AWS, "main", staging, "default"); assertCloudAccount("default", spec, AWS, "beta", prod, "us-west-2"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-2"); assertCloudAccount("", spec, AWS, "beta", prod, "us-west-3"); assertCloudAccount("", spec, GCP, "beta", prod, "us-west-3"); }
class DeploymentSpecTest { @Test public void simpleSpec() { String specXml = "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertFalse(spec.majorVersion().isPresent()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(test)); assertTrue(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void specPinningMajorVersion() { String specXml = "<deployment version='1.0' major-version='6'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.majorVersion().isPresent()); assertEquals(6, (int)spec.majorVersion().get()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(staging)); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( """ <deployment version='1.0'> <instance id='default'> <prod> <region active='false'>us-east1</region> <region active='true'>us-west1</region> </prod> </instance> </deployment> """); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(2, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(0)).active()); assertTrue(spec.requireInstance("default").steps().get(1).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(1)).active()); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("default").upgradePolicy()); assertEquals(DeploymentSpec.RevisionTarget.latest, spec.requireInstance("default").revisionTarget()); assertEquals(DeploymentSpec.RevisionChange.whenFailing, spec.requireInstance("default").revisionChange()); assertEquals(DeploymentSpec.UpgradeRollout.separate, spec.requireInstance("default").upgradeRollout()); assertEquals(0, spec.requireInstance("default").minRisk()); assertEquals(0, spec.requireInstance("default").maxRisk()); assertEquals(8, spec.requireInstance("default").maxIdleHours()); } @Test public void specWithTags() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='a' tags='tag1 tag2'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='b' tags='tag3'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Tags.fromString("tag1 tag2"), spec.requireInstance("a").tags()); assertEquals(Tags.fromString("tag3"), spec.requireInstance("b").tags()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("default")); } @Test public void productionTests() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " <delay hours='1' />" + " <test>us-west-1</test>" + " <test>us-east-1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> instanceSteps = spec.steps().get(0).steps(); assertEquals(7, instanceSteps.size()); assertEquals("test", instanceSteps.get(0).toString()); assertEquals("staging", instanceSteps.get(1).toString()); assertEquals("prod.us-east-1", instanceSteps.get(2).toString()); assertEquals("prod.us-west-1", instanceSteps.get(3).toString()); assertEquals("delay PT1H", instanceSteps.get(4).toString()); assertEquals("tests for prod.us-west-1", instanceSteps.get(5).toString()); assertEquals("tests for prod.us-east-1", instanceSteps.get(6).toString()); } @Test(expected = IllegalArgumentException.class) public void duplicateProductionTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " <test>us-east1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestBeforeDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <test>us-east1</test>" + " <region active='true'>us-east1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestInParallelWithDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <parallel>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void maximalProductionSpecMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='instance2'>" + " <prod>" + " <region active='true'>us-central1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(1, instance2.steps().size()); assertEquals(1, instance2.zones().size()); assertTrue(instance2.steps().get(0).concerns(prod, Optional.of(RegionName.from("us-central1")))); } @Test public void multipleInstancesShortForm() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1, instance2'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); assertCorrectFirstInstance(spec.requireInstance("instance2")); } private void assertCorrectFirstInstance(DeploymentInstanceSpec instance) { assertEquals(5, instance.steps().size()); assertEquals(4, instance.zones().size()); assertTrue(instance.steps().get(0).concerns(test)); assertTrue(instance.steps().get(1).concerns(staging)); assertTrue(instance.steps().get(2).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)instance.steps().get(2)).active()); assertTrue(instance.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, instance.steps().get(3).delay().getSeconds()); assertTrue(instance.steps().get(4).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)instance.steps().get(4)).active()); assertTrue(instance.concerns(test, Optional.empty())); assertTrue(instance.concerns(test, Optional.of(RegionName.from("region1")))); assertTrue(instance.concerns(staging, Optional.empty())); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(instance.concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(instance.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.requireInstance("default").globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.requireInstance("default").globalServiceId().get()); } @Test public void productionSpecWithUpgradeRevisionSettings() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + " <instance id='custom'>" + " <upgrade revision-change='always' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("next", spec.requireInstance("default").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("custom").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("default").revisionChange().toString()); assertEquals("always", spec.requireInstance("custom").revisionChange().toString()); assertEquals(3, spec.requireInstance("default").minRisk()); assertEquals(12, spec.requireInstance("default").maxRisk()); assertEquals(32, spec.requireInstance("default").maxIdleHours()); } @Test public void productionSpecsWithIllegalRevisionSettings() { assertEquals("revision-change must be 'when-clear' when max-risk is specified, but got: 'always'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='always' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("revision-target must be 'next' when max-risk is specified, but got: 'latest'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '12'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='13' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '0'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade min-risk='3' />" + " </instance>" + "</deployment>")) .getMessage()); } @Test public void productionSpecWithUpgradeRollout() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade rollout='leading' />" + " </instance>" + " <instance id='aggressive'>" + " <upgrade rollout='simultaneous' />" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("leading", spec.requireInstance("default").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("custom").upgradeRollout().toString()); assertEquals("simultaneous", spec.requireInstance("aggressive").upgradeRollout().toString()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("default").upgradePolicy().toString()); assertEquals("defaultPolicy", spec.requireInstance("custom").upgradePolicy().toString()); } @Test public void upgradePolicyDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <upgrade policy='canary' rollout='leading' revision-target='next' revision-change='when-clear' />" + " <instance id='instance1'/>" + " <instance id='instance2'>" + " <upgrade policy='conservative' rollout='separate' revision-target='latest' revision-change='when-failing' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("instance1").upgradePolicy().toString()); assertEquals("conservative", spec.requireInstance("instance2").upgradePolicy().toString()); assertEquals("next", spec.requireInstance("instance1").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("instance2").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("instance1").revisionChange().toString()); assertEquals("whenFailing", spec.requireInstance("instance2").revisionChange().toString()); assertEquals("leading", spec.requireInstance("instance1").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("instance2").upgradeRollout().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='47'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT48H1S but max 48 hours is allowed", e.getMessage()); } } @Test public void onlyAthenzServiceDefinedInInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(1, spec.instances().size()); DeploymentInstanceSpec instance = spec.instances().get(0); assertEquals("default", instance.name().value()); assertEquals("service", instance.athenzService(prod, RegionName.defaultName()).get().value()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelSteps parallelSteps = ((DeploymentSpec.ParallelSteps) spec.requireInstance("default").steps().get(1)); assertEquals(2, parallelSteps.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelSteps.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelSteps.zones().get(1).region().get()); } @Test public void testAndStagingOutsideAndInsideInstance() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <staging/>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(4, steps.size()); assertEquals("test", steps.get(0).toString()); assertEquals("staging", steps.get(1).toString()); assertEquals("instance 'instance0'", steps.get(2).toString()); assertEquals("instance 'instance1'", steps.get(3).toString()); List<DeploymentSpec.Step> instance0Steps = ((DeploymentInstanceSpec)steps.get(2)).steps(); assertEquals(1, instance0Steps.size()); assertEquals("prod.us-west-1", instance0Steps.get(0).toString()); List<DeploymentSpec.Step> instance1Steps = ((DeploymentInstanceSpec)steps.get(3)).steps(); assertEquals(3, instance1Steps.size()); assertEquals("test", instance1Steps.get(0).toString()); assertEquals("staging", instance1Steps.get(1).toString()); assertEquals("prod.us-west-1", instance1Steps.get(2).toString()); } @Test public void nestedParallelAndSteps() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <staging />" + " <instance id='instance' athenz-service='in-service'>" + " <prod>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <steps>" + " <region active='true'>us-east-3</region>" + " <delay hours='2' />" + " <region active='true'>eu-west-1</region>" + " <delay hours='2' />" + " </steps>" + " <steps>" + " <delay hours='3' />" + " <region active='true'>aws-us-east-1a</region>" + " <parallel>" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>" + " <region active='true'>ap-southeast-2</region>" + " <test>aws-us-east-1a</test>" + " </parallel>" + " </steps>" + " <delay hours='3' minutes='30' />" + " </parallel>" + " <region active='true'>us-north-7</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(2, steps.size()); assertEquals("staging", steps.get(0).toString()); assertEquals("instance 'instance'", steps.get(1).toString()); assertEquals(Duration.ofHours(4), steps.get(1).delay()); List<DeploymentSpec.Step> instanceSteps = steps.get(1).steps(); assertEquals(2, instanceSteps.size()); assertEquals("4 parallel steps", instanceSteps.get(0).toString()); assertEquals("prod.us-north-7", instanceSteps.get(1).toString()); List<DeploymentSpec.Step> parallelSteps = instanceSteps.get(0).steps(); assertEquals(4, parallelSteps.size()); assertEquals("prod.us-west-1", parallelSteps.get(0).toString()); assertEquals("4 steps", parallelSteps.get(1).toString()); assertEquals("3 steps", parallelSteps.get(2).toString()); assertEquals("delay PT3H30M", parallelSteps.get(3).toString()); List<DeploymentSpec.Step> firstSerialSteps = parallelSteps.get(1).steps(); assertEquals(4, firstSerialSteps.size()); assertEquals("prod.us-east-3", firstSerialSteps.get(0).toString()); assertEquals("delay PT2H", firstSerialSteps.get(1).toString()); assertEquals("prod.eu-west-1", firstSerialSteps.get(2).toString()); assertEquals("delay PT2H", firstSerialSteps.get(3).toString()); List<DeploymentSpec.Step> secondSerialSteps = parallelSteps.get(2).steps(); assertEquals(3, secondSerialSteps.size()); assertEquals("delay PT3H", secondSerialSteps.get(0).toString()); assertEquals("prod.aws-us-east-1a", secondSerialSteps.get(1).toString()); assertEquals("3 parallel steps", secondSerialSteps.get(2).toString()); List<DeploymentSpec.Step> innerParallelSteps = secondSerialSteps.get(2).steps(); assertEquals(3, innerParallelSteps.size()); assertEquals("prod.ap-northeast-1", innerParallelSteps.get(0).toString()); assertEquals("no-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-northeast-1")).get().value()); assertEquals("prod.ap-southeast-2", innerParallelSteps.get(1).toString()); assertEquals("in-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-southeast-2")).get().value()); assertEquals("tests for prod.aws-us-east-1a", innerParallelSteps.get(2).toString()); } @Test public void parallelInstances() { StringReader r = new StringReader( "<deployment>" + " <parallel>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(1, steps.size()); assertEquals("2 parallel steps", steps.get(0).toString()); List<DeploymentSpec.Step> parallelSteps = steps.get(0).steps(); assertEquals("instance 'instance0'", parallelSteps.get(0).toString()); assertEquals("instance 'instance1'", parallelSteps.get(1).toString()); } @Test public void instancesWithDelay() { StringReader r = new StringReader( "<deployment>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <delay hours='12'/>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(3, steps.size()); assertEquals("instance 'instance0'", steps.get(0).toString()); assertEquals("delay PT12H", steps.get(1).toString()); assertEquals("instance 'instance1'", steps.get(2).toString()); } @Test public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePolicies() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance0'/>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesAfterParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + " <instance id='instance3'/>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithDifferentUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(DeploymentSpec.UpgradePolicy.conservative, spec.requireInstance("instance1").upgradePolicy()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("instance2").upgradePolicy()); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec1() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " <block-change days='mon,tue' hours='15-16'/>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec2() { StringReader r = new StringReader( "<deployment>\n" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <test/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithChangeBlocker() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <block-change days='mon-sun' hours='0-23' time-zone='CET' from-date='2022-01-01' to-date='2022-01-15'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(3, spec.requireInstance("default").changeBlocker().size()); assertTrue(spec.requireInstance("default").changeBlocker().get(0).blocksVersions()); assertFalse(spec.requireInstance("default").changeBlocker().get(0).blocksRevisions()); assertEquals(ZoneId.of("UTC"), spec.requireInstance("default").changeBlocker().get(0).window().zone()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksVersions()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksRevisions()); assertEquals(ZoneId.of("CET"), spec.requireInstance("default").changeBlocker().get(1).window().zone()); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T14:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T15:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T16:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T17:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T09:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T08:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T10:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2022-01-15T16:00:00.00Z"))); } @Test public void changeBlockerInheritance() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <instance id='instance1'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); String inheritedChangeBlocker = "change blocker revision=false version=true window=time window for hour(s) " + "[15, 16] on [monday, tuesday] in time zone UTC and date range [any date, any date]"; assertEquals(2, spec.requireInstance("instance1").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance1").changeBlocker().get(0).toString()); assertEquals("change blocker revision=true version=true window=time window for hour(s) [10] on " + "[saturday] in time zone CET and date range [any date, any date]", spec.requireInstance("instance1").changeBlocker().get(1).toString()); assertEquals(1, spec.requireInstance("instance2").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance2").changeBlocker().get(0).toString()); } @Test public void athenzConfigIsReadFromDeployment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZones() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-central-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-central-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZonesAndInstances() { String r = """ <deployment athenz-domain='domain' athenz-service='service'> <parallel> <instance id='instance1'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> <instance id='instance2'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> </parallel> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); assertEquals("service", spec.requireInstance("instance2").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigIsReadFromInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(Optional.empty(), spec.athenzService()); assertEquals("service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzServiceIsOverriddenFromEnvironment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='unused-service'>" + " <instance id='default' athenz-service='service'>" + " <test />" + " <staging athenz-service='staging-service' />" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("service", spec.requireInstance("default").athenzService(test, RegionName.from("us-east-1")).get().value()); assertEquals("staging-service", spec.requireInstance("default").athenzService(staging, RegionName.from("us-north-1")).get().value()); assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test(expected = IllegalArgumentException.class) public void missingAthenzServiceFails() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void athenzServiceWithoutDomainFails() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod athenz-service='service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void noNotifications() { assertEquals(Notifications.none(), DeploymentSpec.fromXml("<deployment>" + " <instance id='default'/>" + "</deployment>").requireInstance("default").notifications()); } @Test public void emptyNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <notifications/>" + " </instance>" + "</deployment>"); assertEquals(Notifications.none(), spec.requireInstance("default").notifications()); } @Test public void someNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>\n" + " <instance id='default'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@dev\" when=\"failing-commit\"/>" + " <email address=\"jane@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>"); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failing)); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failingCommit)); assertEquals(ImmutableSet.of("john@dev", "jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failingCommit)); assertEquals(ImmutableSet.of("jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failing)); } @Test public void notificationsWithMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " <notifications when=\"failing-commit\">" + " <email role=\"author\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failing)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void notificationsDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <notifications>" + " <email role=\"author\" when=\"failing\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\" when=\"failing-commit\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance1.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failingCommit)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance2.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void customTesterFlavor() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <test tester-flavor="d-1-4-20" /> <staging /> <prod tester-flavor="d-2-8-50"> <region active="false">us-north-7</region> </prod> </instance> </deployment>"""); assertEquals(Optional.of("d-1-4-20"), spec.requireInstance("default").steps().get(0).zones().get(0).testerFlavor()); assertEquals(Optional.empty(), spec.requireInstance("default").steps().get(1).zones().get(0).testerFlavor()); assertEquals(Optional.of("d-2-8-50"), spec.requireInstance("default").steps().get(2).zones().get(0).testerFlavor()); } @Test public void noEndpoints() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'/> </deployment> """); assertEquals(Collections.emptyList(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), com.yahoo.config.provision.zone.ZoneId.from("test", "us"), ClusterSpec.Id.from("cluster"))); } @Test public void emptyEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <endpoints/> </instance> </deployment>"""); assertEquals(List.of(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); } @Test public void someEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region active="true">us-east</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id='bax' type='zone' enabled='true' /> <endpoint container-id='froz' type='zone' enabled='false' /> <endpoint container-id='froz' type='private'> <region>us-east</region> <allow with='aws-private-link' arn='barn' /> <allow with='gcp-service-connect' project='nine' /> </endpoint> </endpoints> </instance> </deployment>"""); assertEquals( List.of("foo", "nalle", "default"), spec.requireInstance("default").endpoints().stream().map(Endpoint::endpointId).toList() ); assertEquals( List.of("bar", "frosk", "quux"), spec.requireInstance("default").endpoints().stream().map(Endpoint::containerId).toList() ); assertEquals(List.of(RegionName.from("us-east")), spec.requireInstance("default").endpoints().get(0).regions()); var zone = from(prod, RegionName.from("us-east")); var testZone = from(test, RegionName.from("us-east")); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("custom"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), defaultId(), ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.privateEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("froz"))); assertEquals(new ZoneEndpoint(false, true, List.of(new AllowedUrn(AccessType.awsPrivateLink, "barn"), new AllowedUrn(AccessType.gcpServiceConnect, "nine"))), spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("froz"))); } @Test public void invalidEndpoints() { assertInvalidEndpoints("<endpoint id='FOO' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'FOO'"); assertInvalidEndpoints("<endpoint id='123' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got '123'"); assertInvalidEndpoints("<endpoint id='foo!' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo!'"); assertInvalidEndpoints("<endpoint id='foo.bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo.bar'"); assertInvalidEndpoints("<endpoint id='foo--bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo--bar'"); assertInvalidEndpoints("<endpoint id='foo-' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo-'"); assertInvalidEndpoints("<endpoint id='foooooooooooo' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foooooooooooo'"); assertInvalidEndpoints("<endpoint id='foo' container-id='qrs'/><endpoint id='foo' container-id='qrs'/>", "Endpoint id 'foo' is specified multiple times"); assertInvalidEndpoints("<endpoint id='default' type='zone' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint id='default' type='private' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint type='zone' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint type='private' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint container-id='foo' type='zone'><allow /></endpoint>", "Instance-level endpoint 'default': only endpoints of type 'private' can specify 'allow' children"); assertInvalidEndpoints("<endpoint type='private' container-id='foo' enabled='true' />", "Instance-level endpoint 'default': only endpoints of type 'zone' can specify 'enabled'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs'/><endpoint type='zone' container-id='qrs'/>", "Multiple zone endpoints (for all regions) declared for container id 'qrs'"); assertInvalidEndpoints("<endpoint type='private' container-id='qrs'><region>us</region></endpoint>" + "<endpoint type='private' container-id='qrs'><region>us</region></endpoint>", "Multiple private endpoints declared for container id 'qrs' in region 'us'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs' />" + "<endpoint type='zone' container-id='qrs'><region>us</region></endpoint>", "Zone endpoint for container id 'qrs' declared both with region 'us', and for all regions."); } @Test public void validEndpoints() { assertEquals(List.of("default"), endpointIds("<endpoint container-id='qrs'/>")); assertEquals(List.of("default"), endpointIds("<endpoint id='' container-id='qrs'/>")); assertEquals(List.of("f"), endpointIds("<endpoint id='f' container-id='qrs'/>")); assertEquals(List.of("foo"), endpointIds("<endpoint id='foo' container-id='qrs'/>")); assertEquals(List.of("foo-bar"), endpointIds("<endpoint id='foo-bar' container-id='qrs'/>")); assertEquals(List.of("foo", "bar"), endpointIds("<endpoint id='foo' container-id='qrs'/><endpoint id='bar' container-id='qrs'/>")); assertEquals(List.of("fooooooooooo"), endpointIds("<endpoint id='fooooooooooo' container-id='qrs'/>")); } @Test public void endpointDefaultRegions() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>us-east</region> <region>us-west</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint container-id="bar" type='private'> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id="quux" type='private' /> </endpoints> </instance> </deployment>"""); assertEquals(Set.of("us-east"), endpointRegions("foo", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("nalle", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("default", spec)); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, false, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("quux"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("quux"))); assertEquals(new HashSet<>() {{ add(null); add(from("prod", "us-east")); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("bar")).keySet()); assertEquals(new HashSet<>() {{ add(null); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("quux")).keySet()); assertEquals(Set.of(ClusterSpec.Id.from("bar"), ClusterSpec.Id.from("quux")), spec.requireInstance("default").zoneEndpoints().keySet()); } @Test public void instanceEndpointDisallowsRegionAttributeOrInstanceTag() { String xmlForm = """ <deployment> <instance id='default'> <prod> <region active="true">us-east</region> <region active="true">us-west</region> </prod> <endpoints> <endpoint container-id="bar" %s> %s </endpoint> </endpoints> </instance> </deployment>"""; assertInvalid(String.format(xmlForm, "id='foo' region='us-east'", "<region>us-east</region>"), "Instance-level endpoint 'foo': invalid 'region' attribute"); assertInvalid(String.format(xmlForm, "id='foo'", "<instance>us-east</instance>"), "Instance-level endpoint 'foo': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='zone'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='private'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); } @Test public void applicationLevelEndpointValidation() { String xmlForm = """ <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <endpoints> <endpoint id="foo" container-id="qrs" %s> <instance %s %s>%s</instance> %s </endpoint> </endpoints> </deployment> """; assertInvalid(String.format(xmlForm, "", "weight='1'", "", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "region='us-west-1'", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "", "", "main", ""), "Missing required attribute 'weight' in 'instance"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "", ""), "Application-level endpoint 'foo': empty 'instance' element"); assertInvalid(String.format(xmlForm, "region='invalid'", "weight='1'", "", "main", ""), "Application-level endpoint 'foo': targets undeclared region 'invalid' in instance 'main'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='foo'", "", "main", ""), "Application-level endpoint 'foo': invalid weight value 'foo'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "main", "<region>us-east-3</region>"), "Application-level endpoint 'foo': invalid element 'region'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='0'", "", "main", ""), "Application-level endpoint 'foo': sum of all weights must be positive, got 0"); assertInvalid(String.format(xmlForm, "type='zone'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'zone'"); assertInvalid(String.format(xmlForm, "type='private'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'private'"); } @Test public void cannotTargetDisabledEndpoints() { assertEquals("Instance-level endpoint 'default': all eligible zone endpoints have 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id' /> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Instance-level endpoint 'default': targets zone endpoint in 'us' with 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id'> <region>us</region> </endpoint> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Application-level endpoint 'default': targets 'us' in 'default', but its zone endpoint has 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint type='zone' container-id='id' enabled='false'> <region>us</region> </endpoint> </endpoints> </instance> <endpoints> <endpoint container-id='id' region='us'> <instance weight='1'>default</instance> </endpoint> </endpoints> </deployment> """)) .getMessage()); } @Test public void applicationLevelEndpoint() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> <endpoints> <endpoint id="glob" container-id="music"/> </endpoints> </instance> <endpoints> <endpoint id="foo" container-id="movies" region='us-west-1'> <instance weight="2">beta</instance> <instance weight="8">main</instance> </endpoint> <endpoint id="bar" container-id="music" region='us-east-3'> <instance weight="10">main</instance> </endpoint> <endpoint id="baz" container-id="moose"> <instance weight="1" region='us-west-1'>main</instance> <instance weight="2" region='us-east-3'>main</instance> <instance weight="3" region='us-west-1'>beta</instance> </endpoint> </endpoints> </deployment> """); assertEquals(List.of(new Endpoint("foo", "movies", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 8))), new Endpoint("bar", "music", Level.application, List.of(new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 10))), new Endpoint("baz", "moose", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 3)))), spec.endpoints()); assertEquals(List.of(new Endpoint("glob", "music", Level.instance, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 1)))), spec.requireInstance("main").endpoints()); } @Test public void disallowExcessiveUpgradeBlocking() { List<String> specs = List.of( """ <deployment> <block-change/> </deployment>""", """ <deployment> <block-change days="mon-wed"/> <block-change days="tue-sun"/> </deployment>""", """ <deployment> <block-change to-date="2023-01-01"/> </deployment>""", """ <deployment> <block-change days="sat-sun"/> <block-change days="mon-fri" hours="0-10" from-date="2023-01-01" to-date="2023-01-15"/> <block-change days="mon-fri" hours="11-23" from-date="2023-01-01" to-date="2023-01-15"/> <block-change from-date="2023-01-14" to-date="2023-01-31"/></deployment>""" ); ManualClock clock = new ManualClock(); clock.setInstant(Instant.parse("2022-01-05T15:00:00.00Z")); for (var spec : specs) { assertInvalid(spec, "Cannot block Vespa upgrades for longer than 21 consecutive days", clock); } } @Test public void testDeployableHash() { assertEquals(DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags=' '> <test /> <staging tester-flavor='2-8-50' /> <block-change days='mon' /> <upgrade policy='canary' revision-target='next' revision-change='when-clear' rollout='simultaneous' /> <prod /> <notifications> <email role='author' /> <email address='dev@duff' /> </notifications> </instance> </deployment>""").deployableHashCode()); assertEquals(DeploymentSpec.fromXml(""" <deployment> <parallel> <instance id='one'> <prod> <region>name</region> </prod> </instance> <instance id='two' /> </parallel> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='one'> <prod> <steps> <region>name</region> <delay hours='3' /> <test>name</test> </steps> </prod> </instance> <instance id='two' /></deployment>""").deployableHashCode()); String referenceSpec = """ <deployment> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>"""; assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml("<deployment />").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags='tag1'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>other</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment major-version='9'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain' athenz-service='service'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default' athenz-service='service'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default'> <prod athenz-service='prod'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod global-service-id='service'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>name</region> </prod> <endpoints> <endpoint container-id="quux" /> </endpoints> </instance> </deployment>""").deployableHashCode()); } @Test @Test public void hostTTL() { String r = """ <deployment version='1.0' cloud-account='100000000000' empty-host-ttl='1h'> <instance id='alpha'> <staging /> <prod empty-host-ttl='1m'> <region>us-east</region> <region empty-host-ttl='2m'>us-west</region> <test>us-east</test> <test empty-host-ttl='3m'>us-west</test> </prod> </instance> <instance id='beta'> <staging empty-host-ttl='3d'/> <perf empty-host-ttl='4h'/> <prod> <region>us-east</region> <region empty-host-ttl='0d'>us-west</region> </prod> </instance> <instance id='gamma' empty-host-ttl='6h'> <dev empty-host-ttl='7d'/> <prod> <region>us-east</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000")), spec.cloudAccounts()); assertHostTTL(Duration.ofHours(1), spec, "alpha", test, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", staging, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", dev, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", perf, null); assertHostTTL(Duration.ofMinutes(1), spec, "alpha", prod, "us-east"); assertHostTTL(Duration.ofMinutes(2), spec, "alpha", prod, "us-west"); assertEquals(Optional.of(Duration.ofMinutes(1)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-east"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertEquals(Optional.of(Duration.ofMinutes(3)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-west"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertHostTTL(Duration.ofHours(1), spec, "beta", test, null); assertHostTTL(Duration.ofDays(3), spec, "beta", staging, null); assertHostTTL(Duration.ofHours(1), spec, "beta", dev, null); assertHostTTL(Duration.ofHours(4), spec, "beta", perf, null); assertHostTTL(Duration.ofHours(1), spec, "beta", prod, "us-east"); assertHostTTL(Duration.ZERO, spec, "beta", prod, "us-west"); assertHostTTL(Duration.ofHours(6), spec, "gamma", test, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", staging, null); assertHostTTL(Duration.ofDays(7), spec, "gamma", dev, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", perf, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-east"); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-west"); assertHostTTL(Duration.ofHours(1), spec, "nope", test, null); assertHostTTL(Duration.ofHours(1), spec, "nope", staging, null); assertHostTTL(Duration.ofHours(1), spec, "nope", dev, null); assertHostTTL(Duration.ofHours(1), spec, "nope", perf, null); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-east"); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-west"); } private void assertCloudAccount(String expected, DeploymentSpec spec, CloudName cloud, String instance, Environment environment, String region) { assertEquals(CloudAccount.from(expected), spec.cloudAccount(cloud, InstanceName.from(instance), com.yahoo.config.provision.zone.ZoneId.from(environment, RegionName.from(region)))); } private void assertHostTTL(Duration expected, DeploymentSpec spec, String instance, Environment environment, String region) { assertEquals(Optional.of(expected), spec.hostTTL(InstanceName.from(instance), environment, region == null ? RegionName.defaultName() : RegionName.from(region))); } private static void assertInvalid(String deploymentSpec, String errorMessagePart) { assertInvalid(deploymentSpec, errorMessagePart, new ManualClock()); } private static void assertInvalid(String deploymentSpec, String errorMessagePart, Clock clock) { if (errorMessagePart.isEmpty()) throw new IllegalArgumentException("Message part must be non-empty"); try { new DeploymentSpecXmlReader(true, clock).read(deploymentSpec); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue("\"" + e.getMessage() + "\" contains \"" + errorMessagePart + "\"", e.getMessage().contains(errorMessagePart)); } } private static void assertInvalidEndpoints(String endpointsBody, String error) { assertEquals(error, assertThrows(IllegalArgumentException.class, () -> endpointIds(endpointsBody)) .getMessage()); } private static Set<String> endpointRegions(String endpointId, DeploymentSpec spec) { return spec.requireInstance("default").endpoints().stream() .filter(endpoint -> endpoint.endpointId().equals(endpointId)) .flatMap(endpoint -> endpoint.regions().stream()) .map(RegionName::value) .collect(Collectors.toSet()); } private static List<String> endpointIds(String endpointsBody) { var xml = "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active=\"true\">us-east</region>" + " </prod>" + " <endpoints>" + endpointsBody + " </endpoints>" + " </instance>" + "</deployment>"; return DeploymentSpec.fromXml(xml).requireInstance("default").endpoints().stream() .map(Endpoint::endpointId) .toList(); } }
class DeploymentSpecTest { @Test public void simpleSpec() { String specXml = "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertFalse(spec.majorVersion().isPresent()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(test)); assertTrue(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void specPinningMajorVersion() { String specXml = "<deployment version='1.0' major-version='6'>" + " <instance id='default'>" + " <test/>" + " </instance>" + "</deployment>"; StringReader r = new StringReader(specXml); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(specXml, spec.xmlForm()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.majorVersion().isPresent()); assertEquals(6, (int)spec.majorVersion().get()); } @Test public void stagingSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(1, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(staging)); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(staging, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty())); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); } @Test public void minimalProductionSpec() { StringReader r = new StringReader( """ <deployment version='1.0'> <instance id='default'> <prod> <region active='false'>us-east1</region> <region active='true'>us-west1</region> </prod> </instance> </deployment> """); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(1, spec.steps().size()); assertEquals(2, spec.requireInstance("default").steps().size()); assertTrue(spec.requireInstance("default").steps().get(0).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(0)).active()); assertTrue(spec.requireInstance("default").steps().get(1).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(1)).active()); assertFalse(spec.requireInstance("default").concerns(test, Optional.empty())); assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty())); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(spec.requireInstance("default").globalServiceId().isPresent()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("default").upgradePolicy()); assertEquals(DeploymentSpec.RevisionTarget.latest, spec.requireInstance("default").revisionTarget()); assertEquals(DeploymentSpec.RevisionChange.whenFailing, spec.requireInstance("default").revisionChange()); assertEquals(DeploymentSpec.UpgradeRollout.separate, spec.requireInstance("default").upgradeRollout()); assertEquals(0, spec.requireInstance("default").minRisk()); assertEquals(0, spec.requireInstance("default").maxRisk()); assertEquals(8, spec.requireInstance("default").maxIdleHours()); } @Test public void specWithTags() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='a' tags='tag1 tag2'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='b' tags='tag3'>" + " <prod>" + " <region active='false'>us-east1</region>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Tags.fromString("tag1 tag2"), spec.requireInstance("a").tags()); assertEquals(Tags.fromString("tag3"), spec.requireInstance("b").tags()); } @Test public void maximalProductionSpec() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("default")); } @Test public void productionTests() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " <delay hours='1' />" + " <test>us-west-1</test>" + " <test>us-east-1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> instanceSteps = spec.steps().get(0).steps(); assertEquals(7, instanceSteps.size()); assertEquals("test", instanceSteps.get(0).toString()); assertEquals("staging", instanceSteps.get(1).toString()); assertEquals("prod.us-east-1", instanceSteps.get(2).toString()); assertEquals("prod.us-west-1", instanceSteps.get(3).toString()); assertEquals("delay PT1H", instanceSteps.get(4).toString()); assertEquals("tests for prod.us-west-1", instanceSteps.get(5).toString()); assertEquals("tests for prod.us-east-1", instanceSteps.get(6).toString()); } @Test(expected = IllegalArgumentException.class) public void duplicateProductionTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " <test>us-east1</test>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestBeforeDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <test>us-east1</test>" + " <region active='true'>us-east1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void productionTestInParallelWithDeployment() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod>" + " <parallel>" + " <region active='true'>us-east1</region>" + " <test>us-east1</test>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void maximalProductionSpecMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + " <instance id='instance2'>" + " <prod>" + " <region active='true'>us-central1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(1, instance2.steps().size()); assertEquals(1, instance2.zones().size()); assertTrue(instance2.steps().get(0).concerns(prod, Optional.of(RegionName.from("us-central1")))); } @Test public void multipleInstancesShortForm() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1, instance2'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='false'>us-east1</region>" + " <delay hours='3' minutes='30'/>" + " <region active='true'>us-west1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertCorrectFirstInstance(spec.requireInstance("instance1")); assertCorrectFirstInstance(spec.requireInstance("instance2")); } private void assertCorrectFirstInstance(DeploymentInstanceSpec instance) { assertEquals(5, instance.steps().size()); assertEquals(4, instance.zones().size()); assertTrue(instance.steps().get(0).concerns(test)); assertTrue(instance.steps().get(1).concerns(staging)); assertTrue(instance.steps().get(2).concerns(prod, Optional.of(RegionName.from("us-east1")))); assertFalse(((DeploymentSpec.DeclaredZone)instance.steps().get(2)).active()); assertTrue(instance.steps().get(3) instanceof DeploymentSpec.Delay); assertEquals(3 * 60 * 60 + 30 * 60, instance.steps().get(3).delay().getSeconds()); assertTrue(instance.steps().get(4).concerns(prod, Optional.of(RegionName.from("us-west1")))); assertTrue(((DeploymentSpec.DeclaredZone)instance.steps().get(4)).active()); assertTrue(instance.concerns(test, Optional.empty())); assertTrue(instance.concerns(test, Optional.of(RegionName.from("region1")))); assertTrue(instance.concerns(staging, Optional.empty())); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-east1")))); assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-west1")))); assertFalse(instance.concerns(prod, Optional.of(RegionName.from("no-such-region")))); assertFalse(instance.globalServiceId().isPresent()); } @Test public void productionSpecWithGlobalServiceId() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <prod global-service-id='query'>" + " <region active='true'>us-east-1</region>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(spec.requireInstance("default").globalServiceId(), Optional.of("query")); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInTest() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <test global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected=IllegalArgumentException.class) public void globalServiceIdInStaging() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='default'>" + " <staging global-service-id='query' />" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void productionSpecWithGlobalServiceIdBeforeStaging() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <test/>" + " <prod global-service-id='qrs'>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </prod>" + " <staging/>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("qrs", spec.requireInstance("default").globalServiceId().get()); } @Test public void productionSpecWithUpgradeRevisionSettings() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + " <instance id='custom'>" + " <upgrade revision-change='always' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("next", spec.requireInstance("default").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("custom").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("default").revisionChange().toString()); assertEquals("always", spec.requireInstance("custom").revisionChange().toString()); assertEquals(3, spec.requireInstance("default").minRisk()); assertEquals(12, spec.requireInstance("default").maxRisk()); assertEquals(32, spec.requireInstance("default").maxIdleHours()); } @Test public void productionSpecsWithIllegalRevisionSettings() { assertEquals("revision-change must be 'when-clear' when max-risk is specified, but got: 'always'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='always' revision-target='next' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("revision-target must be 'next' when max-risk is specified, but got: 'latest'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' min-risk='3' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '12'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade revision-change='when-clear' revision-target='next' min-risk='13' max-risk='12' max-idle-hours='32' />" + " </instance>" + "</deployment>")) .getMessage()); assertEquals("maximum risk cannot be less than minimum risk score, but got: '0'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <upgrade min-risk='3' />" + " </instance>" + "</deployment>")) .getMessage()); } @Test public void productionSpecWithUpgradeRollout() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade rollout='leading' />" + " </instance>" + " <instance id='aggressive'>" + " <upgrade rollout='simultaneous' />" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("leading", spec.requireInstance("default").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("custom").upgradeRollout().toString()); assertEquals("simultaneous", spec.requireInstance("aggressive").upgradeRollout().toString()); } @Test public void productionSpecWithUpgradePolicy() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " </instance>" + " <instance id='custom'/>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("default").upgradePolicy().toString()); assertEquals("defaultPolicy", spec.requireInstance("custom").upgradePolicy().toString()); } @Test public void upgradePolicyDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <upgrade policy='canary' rollout='leading' revision-target='next' revision-change='when-clear' />" + " <instance id='instance1'/>" + " <instance id='instance2'>" + " <upgrade policy='conservative' rollout='separate' revision-target='latest' revision-change='when-failing' />" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("canary", spec.requireInstance("instance1").upgradePolicy().toString()); assertEquals("conservative", spec.requireInstance("instance2").upgradePolicy().toString()); assertEquals("next", spec.requireInstance("instance1").revisionTarget().toString()); assertEquals("latest", spec.requireInstance("instance2").revisionTarget().toString()); assertEquals("whenClear", spec.requireInstance("instance1").revisionChange().toString()); assertEquals("whenFailing", spec.requireInstance("instance2").revisionChange().toString()); assertEquals("leading", spec.requireInstance("instance1").upgradeRollout().toString()); assertEquals("separate", spec.requireInstance("instance2").upgradeRollout().toString()); } @Test public void maxDelayExceeded() { try { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <upgrade policy='canary'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <delay hours='47'/>" + " <region active='true'>us-central-1</region>" + " <delay minutes='59' seconds='61'/>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); fail("Expected exception due to exceeding the max total delay"); } catch (IllegalArgumentException e) { assertEquals("The total delay specified is PT48H1S but max 48 hours is allowed", e.getMessage()); } } @Test public void onlyAthenzServiceDefinedInInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service' />" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(1, spec.instances().size()); DeploymentInstanceSpec instance = spec.instances().get(0); assertEquals("default", instance.name().value()); assertEquals("service", instance.athenzService(prod, RegionName.defaultName()).get().value()); } @Test public void productionSpecWithParallelDeployments() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentSpec.ParallelSteps parallelSteps = ((DeploymentSpec.ParallelSteps) spec.requireInstance("default").steps().get(1)); assertEquals(2, parallelSteps.zones().size()); assertEquals(RegionName.from("us-central-1"), parallelSteps.zones().get(0).region().get()); assertEquals(RegionName.from("us-east-3"), parallelSteps.zones().get(1).region().get()); } @Test public void testAndStagingOutsideAndInsideInstance() { StringReader r = new StringReader( "<deployment>" + " <test/>" + " <staging/>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <test/>" + " <staging/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(4, steps.size()); assertEquals("test", steps.get(0).toString()); assertEquals("staging", steps.get(1).toString()); assertEquals("instance 'instance0'", steps.get(2).toString()); assertEquals("instance 'instance1'", steps.get(3).toString()); List<DeploymentSpec.Step> instance0Steps = ((DeploymentInstanceSpec)steps.get(2)).steps(); assertEquals(1, instance0Steps.size()); assertEquals("prod.us-west-1", instance0Steps.get(0).toString()); List<DeploymentSpec.Step> instance1Steps = ((DeploymentInstanceSpec)steps.get(3)).steps(); assertEquals(3, instance1Steps.size()); assertEquals("test", instance1Steps.get(0).toString()); assertEquals("staging", instance1Steps.get(1).toString()); assertEquals("prod.us-west-1", instance1Steps.get(2).toString()); } @Test public void nestedParallelAndSteps() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <staging />" + " <instance id='instance' athenz-service='in-service'>" + " <prod>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <steps>" + " <region active='true'>us-east-3</region>" + " <delay hours='2' />" + " <region active='true'>eu-west-1</region>" + " <delay hours='2' />" + " </steps>" + " <steps>" + " <delay hours='3' />" + " <region active='true'>aws-us-east-1a</region>" + " <parallel>" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>" + " <region active='true'>ap-southeast-2</region>" + " <test>aws-us-east-1a</test>" + " </parallel>" + " </steps>" + " <delay hours='3' minutes='30' />" + " </parallel>" + " <region active='true'>us-north-7</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(2, steps.size()); assertEquals("staging", steps.get(0).toString()); assertEquals("instance 'instance'", steps.get(1).toString()); assertEquals(Duration.ofHours(4), steps.get(1).delay()); List<DeploymentSpec.Step> instanceSteps = steps.get(1).steps(); assertEquals(2, instanceSteps.size()); assertEquals("4 parallel steps", instanceSteps.get(0).toString()); assertEquals("prod.us-north-7", instanceSteps.get(1).toString()); List<DeploymentSpec.Step> parallelSteps = instanceSteps.get(0).steps(); assertEquals(4, parallelSteps.size()); assertEquals("prod.us-west-1", parallelSteps.get(0).toString()); assertEquals("4 steps", parallelSteps.get(1).toString()); assertEquals("3 steps", parallelSteps.get(2).toString()); assertEquals("delay PT3H30M", parallelSteps.get(3).toString()); List<DeploymentSpec.Step> firstSerialSteps = parallelSteps.get(1).steps(); assertEquals(4, firstSerialSteps.size()); assertEquals("prod.us-east-3", firstSerialSteps.get(0).toString()); assertEquals("delay PT2H", firstSerialSteps.get(1).toString()); assertEquals("prod.eu-west-1", firstSerialSteps.get(2).toString()); assertEquals("delay PT2H", firstSerialSteps.get(3).toString()); List<DeploymentSpec.Step> secondSerialSteps = parallelSteps.get(2).steps(); assertEquals(3, secondSerialSteps.size()); assertEquals("delay PT3H", secondSerialSteps.get(0).toString()); assertEquals("prod.aws-us-east-1a", secondSerialSteps.get(1).toString()); assertEquals("3 parallel steps", secondSerialSteps.get(2).toString()); List<DeploymentSpec.Step> innerParallelSteps = secondSerialSteps.get(2).steps(); assertEquals(3, innerParallelSteps.size()); assertEquals("prod.ap-northeast-1", innerParallelSteps.get(0).toString()); assertEquals("no-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-northeast-1")).get().value()); assertEquals("prod.ap-southeast-2", innerParallelSteps.get(1).toString()); assertEquals("in-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-southeast-2")).get().value()); assertEquals("tests for prod.aws-us-east-1a", innerParallelSteps.get(2).toString()); } @Test public void parallelInstances() { StringReader r = new StringReader( "<deployment>" + " <parallel>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(1, steps.size()); assertEquals("2 parallel steps", steps.get(0).toString()); List<DeploymentSpec.Step> parallelSteps = steps.get(0).steps(); assertEquals("instance 'instance0'", parallelSteps.get(0).toString()); assertEquals("instance 'instance1'", parallelSteps.get(1).toString()); } @Test public void instancesWithDelay() { StringReader r = new StringReader( "<deployment>" + " <instance id='instance0'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + " <delay hours='12'/>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-east-3</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); List<DeploymentSpec.Step> steps = spec.steps(); assertEquals(3, steps.size()); assertEquals("instance 'instance0'", steps.get(0).toString()); assertEquals("delay PT12H", steps.get(1).toString()); assertEquals("instance 'instance1'", steps.get(2).toString()); } @Test public void productionSpecWithDuplicateRegions() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-central-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); try { DeploymentSpec.fromXml(r); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); } } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePolicies() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance0'/>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIncreasinglyStrictUpgradePoliciesAfterParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2'>" + " <upgrade policy='canary'/>" + " </instance>" + " </parallel>" + " <instance id='instance3'/>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithDifferentUpgradePoliciesInParallel() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <parallel>" + " <instance id='instance1'>" + " <upgrade policy='conservative'/>" + " </instance>" + " <instance id='instance2' />" + " </parallel>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(DeploymentSpec.UpgradePolicy.conservative, spec.requireInstance("instance1").upgradePolicy()); assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("instance2").upgradePolicy()); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec1() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " <block-change days='mon,tue' hours='15-16'/>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void deploymentSpecWithIllegallyOrderedDeploymentSpec2() { StringReader r = new StringReader( "<deployment>\n" + " <instance id='default'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <test/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void deploymentSpecWithChangeBlocker() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " <block-change days='mon-sun' hours='0-23' time-zone='CET' from-date='2022-01-01' to-date='2022-01-15'/>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(3, spec.requireInstance("default").changeBlocker().size()); assertTrue(spec.requireInstance("default").changeBlocker().get(0).blocksVersions()); assertFalse(spec.requireInstance("default").changeBlocker().get(0).blocksRevisions()); assertEquals(ZoneId.of("UTC"), spec.requireInstance("default").changeBlocker().get(0).window().zone()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksVersions()); assertTrue(spec.requireInstance("default").changeBlocker().get(1).blocksRevisions()); assertEquals(ZoneId.of("CET"), spec.requireInstance("default").changeBlocker().get(1).window().zone()); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T14:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T15:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T16:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-18T17:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T09:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T08:15:30.00Z"))); assertTrue(spec.requireInstance("default").canUpgradeAt(Instant.parse("2017-09-23T10:15:30.00Z"))); assertFalse(spec.requireInstance("default").canUpgradeAt(Instant.parse("2022-01-15T16:00:00.00Z"))); } @Test public void changeBlockerInheritance() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <block-change revision='false' days='mon,tue' hours='15-16'/>" + " <instance id='instance1'>" + " <block-change days='sat' hours='10' time-zone='CET'/>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); String inheritedChangeBlocker = "change blocker revision=false version=true window=time window for hour(s) " + "[15, 16] on [monday, tuesday] in time zone UTC and date range [any date, any date]"; assertEquals(2, spec.requireInstance("instance1").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance1").changeBlocker().get(0).toString()); assertEquals("change blocker revision=true version=true window=time window for hour(s) [10] on " + "[saturday] in time zone CET and date range [any date, any date]", spec.requireInstance("instance1").changeBlocker().get(1).toString()); assertEquals(1, spec.requireInstance("instance2").changeBlocker().size()); assertEquals(inheritedChangeBlocker, spec.requireInstance("instance2").changeBlocker().get(0).toString()); } @Test public void athenzConfigIsReadFromDeployment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZones() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='service'>" + " <instance id='instance1'>" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-central-1</region>" + " <parallel>" + " <region active='true'>us-west-1</region>" + " <region active='true'>us-east-3</region>" + " </parallel>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.athenzService().get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-central-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigPropagatesThroughParallelZonesAndInstances() { String r = """ <deployment athenz-domain='domain' athenz-service='service'> <parallel> <instance id='instance1'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> <instance id='instance2'> <prod> <parallel> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </parallel> </prod> </instance> </parallel> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-west-1")).get().value()); assertEquals("service", spec.requireInstance("instance1").athenzService(prod, RegionName.from("us-east-3")).get().value()); assertEquals("service", spec.requireInstance("instance2").athenzService(prod, RegionName.from("us-east-3")).get().value()); } @Test public void athenzConfigIsReadFromInstance() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default' athenz-service='service'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("domain", spec.athenzDomain().get().value()); assertEquals(Optional.empty(), spec.athenzService()); assertEquals("service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test public void athenzServiceIsOverriddenFromEnvironment() { StringReader r = new StringReader( "<deployment athenz-domain='domain' athenz-service='unused-service'>" + " <instance id='default' athenz-service='service'>" + " <test />" + " <staging athenz-service='staging-service' />" + " <prod athenz-service='prod-service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals("service", spec.requireInstance("default").athenzService(test, RegionName.from("us-east-1")).get().value()); assertEquals("staging-service", spec.requireInstance("default").athenzService(staging, RegionName.from("us-north-1")).get().value()); assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value()); } @Test(expected = IllegalArgumentException.class) public void missingAthenzServiceFails() { StringReader r = new StringReader( "<deployment athenz-domain='domain'>" + " <instance id='default'>" + " <prod>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test(expected = IllegalArgumentException.class) public void athenzServiceWithoutDomainFails() { StringReader r = new StringReader( "<deployment>" + " <instance id='default'>" + " <prod athenz-service='service'>" + " <region active='true'>us-west-1</region>" + " </prod>" + " </instance>" + "</deployment>" ); DeploymentSpec.fromXml(r); } @Test public void noNotifications() { assertEquals(Notifications.none(), DeploymentSpec.fromXml("<deployment>" + " <instance id='default'/>" + "</deployment>").requireInstance("default").notifications()); } @Test public void emptyNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <notifications/>" + " </instance>" + "</deployment>"); assertEquals(Notifications.none(), spec.requireInstance("default").notifications()); } @Test public void someNotifications() { DeploymentSpec spec = DeploymentSpec.fromXml("<deployment>\n" + " <instance id='default'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@dev\" when=\"failing-commit\"/>" + " <email address=\"jane@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>"); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failing)); assertEquals(ImmutableSet.of(author), spec.requireInstance("default").notifications().emailRolesFor(failingCommit)); assertEquals(ImmutableSet.of("john@dev", "jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failingCommit)); assertEquals(ImmutableSet.of("jane@dev"), spec.requireInstance("default").notifications().emailAddressesFor(failing)); } @Test public void notificationsWithMultipleInstances() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " <notifications when=\"failing-commit\">" + " <email role=\"author\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failing)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void notificationsDefault() { StringReader r = new StringReader( "<deployment version='1.0'>" + " <notifications>" + " <email role=\"author\" when=\"failing\"/>" + " <email address=\"mary@dev\"/>" + " </notifications>" + " <instance id='instance1'>" + " <notifications when=\"failing\">" + " <email role=\"author\"/>" + " <email address=\"john@operator\" when=\"failing-commit\"/>" + " </notifications>" + " </instance>" + " <instance id='instance2'>" + " </instance>" + "</deployment>" ); DeploymentSpec spec = DeploymentSpec.fromXml(r); DeploymentInstanceSpec instance1 = spec.requireInstance("instance1"); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance1.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance1.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("john@operator"), instance1.notifications().emailAddressesFor(failingCommit)); DeploymentInstanceSpec instance2 = spec.requireInstance("instance2"); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failing)); assertEquals(Set.of(), instance2.notifications().emailAddressesFor(failing)); assertEquals(Set.of(author), instance2.notifications().emailRolesFor(failingCommit)); assertEquals(Set.of("mary@dev"), instance2.notifications().emailAddressesFor(failingCommit)); } @Test public void customTesterFlavor() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <test tester-flavor="d-1-4-20" /> <staging /> <prod tester-flavor="d-2-8-50"> <region active="false">us-north-7</region> </prod> </instance> </deployment>"""); assertEquals(Optional.of("d-1-4-20"), spec.requireInstance("default").steps().get(0).zones().get(0).testerFlavor()); assertEquals(Optional.empty(), spec.requireInstance("default").steps().get(1).zones().get(0).testerFlavor()); assertEquals(Optional.of("d-2-8-50"), spec.requireInstance("default").steps().get(2).zones().get(0).testerFlavor()); } @Test public void noEndpoints() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'/> </deployment> """); assertEquals(Collections.emptyList(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), com.yahoo.config.provision.zone.ZoneId.from("test", "us"), ClusterSpec.Id.from("cluster"))); } @Test public void emptyEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <endpoints/> </instance> </deployment>"""); assertEquals(List.of(), spec.requireInstance("default").endpoints()); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.defaultName(), defaultId(), ClusterSpec.Id.from("cluster"))); } @Test public void someEndpoints() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region active="true">us-east</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id='bax' type='zone' enabled='true' /> <endpoint container-id='froz' type='zone' enabled='false' /> <endpoint container-id='froz' type='private'> <region>us-east</region> <allow with='aws-private-link' arn='barn' /> <allow with='gcp-service-connect' project='nine' /> </endpoint> </endpoints> </instance> </deployment>"""); assertEquals( List.of("foo", "nalle", "default"), spec.requireInstance("default").endpoints().stream().map(Endpoint::endpointId).toList() ); assertEquals( List.of("bar", "frosk", "quux"), spec.requireInstance("default").endpoints().stream().map(Endpoint::containerId).toList() ); assertEquals(List.of(RegionName.from("us-east")), spec.requireInstance("default").endpoints().get(0).regions()); var zone = from(prod, RegionName.from("us-east")); var testZone = from(test, RegionName.from("us-east")); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("custom"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), defaultId(), ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.defaultEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("bax"))); assertEquals(ZoneEndpoint.privateEndpoint, spec.zoneEndpoint(InstanceName.from("default"), testZone, ClusterSpec.Id.from("froz"))); assertEquals(new ZoneEndpoint(false, true, List.of(new AllowedUrn(AccessType.awsPrivateLink, "barn"), new AllowedUrn(AccessType.gcpServiceConnect, "nine"))), spec.zoneEndpoint(InstanceName.from("default"), zone, ClusterSpec.Id.from("froz"))); } @Test public void invalidEndpoints() { assertInvalidEndpoints("<endpoint id='FOO' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'FOO'"); assertInvalidEndpoints("<endpoint id='123' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got '123'"); assertInvalidEndpoints("<endpoint id='foo!' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo!'"); assertInvalidEndpoints("<endpoint id='foo.bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo.bar'"); assertInvalidEndpoints("<endpoint id='foo--bar' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo--bar'"); assertInvalidEndpoints("<endpoint id='foo-' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foo-'"); assertInvalidEndpoints("<endpoint id='foooooooooooo' container-id='qrs'/>", "Endpoint id must be all lowercase, alphanumeric, with no consecutive dashes, of length 1 to 12, and begin with a character; but got 'foooooooooooo'"); assertInvalidEndpoints("<endpoint id='foo' container-id='qrs'/><endpoint id='foo' container-id='qrs'/>", "Endpoint id 'foo' is specified multiple times"); assertInvalidEndpoints("<endpoint id='default' type='zone' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint id='default' type='private' container-id='foo' />", "Instance-level endpoint 'default': cannot declare 'id' with type 'zone' or 'private'"); assertInvalidEndpoints("<endpoint type='zone' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint type='private' />", "Missing required attribute 'container-id' in 'endpoint'"); assertInvalidEndpoints("<endpoint container-id='foo' type='zone'><allow /></endpoint>", "Instance-level endpoint 'default': only endpoints of type 'private' can specify 'allow' children"); assertInvalidEndpoints("<endpoint type='private' container-id='foo' enabled='true' />", "Instance-level endpoint 'default': only endpoints of type 'zone' can specify 'enabled'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs'/><endpoint type='zone' container-id='qrs'/>", "Multiple zone endpoints (for all regions) declared for container id 'qrs'"); assertInvalidEndpoints("<endpoint type='private' container-id='qrs'><region>us</region></endpoint>" + "<endpoint type='private' container-id='qrs'><region>us</region></endpoint>", "Multiple private endpoints declared for container id 'qrs' in region 'us'"); assertInvalidEndpoints("<endpoint type='zone' container-id='qrs' />" + "<endpoint type='zone' container-id='qrs'><region>us</region></endpoint>", "Zone endpoint for container id 'qrs' declared both with region 'us', and for all regions."); } @Test public void validEndpoints() { assertEquals(List.of("default"), endpointIds("<endpoint container-id='qrs'/>")); assertEquals(List.of("default"), endpointIds("<endpoint id='' container-id='qrs'/>")); assertEquals(List.of("f"), endpointIds("<endpoint id='f' container-id='qrs'/>")); assertEquals(List.of("foo"), endpointIds("<endpoint id='foo' container-id='qrs'/>")); assertEquals(List.of("foo-bar"), endpointIds("<endpoint id='foo-bar' container-id='qrs'/>")); assertEquals(List.of("foo", "bar"), endpointIds("<endpoint id='foo' container-id='qrs'/><endpoint id='bar' container-id='qrs'/>")); assertEquals(List.of("fooooooooooo"), endpointIds("<endpoint id='fooooooooooo' container-id='qrs'/>")); } @Test public void endpointDefaultRegions() { var spec = DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>us-east</region> <region>us-west</region> </prod> <endpoints> <endpoint id="foo" container-id="bar"> <region>us-east</region> </endpoint> <endpoint container-id="bar" type='private'> <region>us-east</region> </endpoint> <endpoint id="nalle" container-id="frosk" /> <endpoint container-id="quux" /> <endpoint container-id="quux" type='private' /> </endpoints> </instance> </deployment>"""); assertEquals(Set.of("us-east"), endpointRegions("foo", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("nalle", spec)); assertEquals(Set.of("us-east", "us-west"), endpointRegions("default", spec)); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, false, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("bar"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-east"), ClusterSpec.Id.from("quux"))); assertEquals(new ZoneEndpoint(true, true, List.of()), spec.zoneEndpoint(InstanceName.from("default"), from("prod", "us-west"), ClusterSpec.Id.from("quux"))); assertEquals(new HashSet<>() {{ add(null); add(from("prod", "us-east")); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("bar")).keySet()); assertEquals(new HashSet<>() {{ add(null); }}, spec.requireInstance("default").zoneEndpoints().get(ClusterSpec.Id.from("quux")).keySet()); assertEquals(Set.of(ClusterSpec.Id.from("bar"), ClusterSpec.Id.from("quux")), spec.requireInstance("default").zoneEndpoints().keySet()); } @Test public void instanceEndpointDisallowsRegionAttributeOrInstanceTag() { String xmlForm = """ <deployment> <instance id='default'> <prod> <region active="true">us-east</region> <region active="true">us-west</region> </prod> <endpoints> <endpoint container-id="bar" %s> %s </endpoint> </endpoints> </instance> </deployment>"""; assertInvalid(String.format(xmlForm, "id='foo' region='us-east'", "<region>us-east</region>"), "Instance-level endpoint 'foo': invalid 'region' attribute"); assertInvalid(String.format(xmlForm, "id='foo'", "<instance>us-east</instance>"), "Instance-level endpoint 'foo': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='zone'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); assertInvalid(String.format(xmlForm, "type='private'", "<instance>us-east</instance>"), "Instance-level endpoint 'default': invalid element 'instance'"); } @Test public void applicationLevelEndpointValidation() { String xmlForm = """ <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <endpoints> <endpoint id="foo" container-id="qrs" %s> <instance %s %s>%s</instance> %s </endpoint> </endpoints> </deployment> """; assertInvalid(String.format(xmlForm, "", "weight='1'", "", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "region='us-west-1'", "main", ""), "'region' attribute must be declared on either <endpoint> or <instance> tag"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "", "", "main", ""), "Missing required attribute 'weight' in 'instance"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "", ""), "Application-level endpoint 'foo': empty 'instance' element"); assertInvalid(String.format(xmlForm, "region='invalid'", "weight='1'", "", "main", ""), "Application-level endpoint 'foo': targets undeclared region 'invalid' in instance 'main'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='foo'", "", "main", ""), "Application-level endpoint 'foo': invalid weight value 'foo'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='1'", "", "main", "<region>us-east-3</region>"), "Application-level endpoint 'foo': invalid element 'region'"); assertInvalid(String.format(xmlForm, "region='us-west-1'", "weight='0'", "", "main", ""), "Application-level endpoint 'foo': sum of all weights must be positive, got 0"); assertInvalid(String.format(xmlForm, "type='zone'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'zone'"); assertInvalid(String.format(xmlForm, "type='private'", "weight='1'", "", "main", ""), "Endpoints at application level cannot be of type 'private'"); } @Test public void cannotTargetDisabledEndpoints() { assertEquals("Instance-level endpoint 'default': all eligible zone endpoints have 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id' /> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Instance-level endpoint 'default': targets zone endpoint in 'us' with 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint container-id='id'> <region>us</region> </endpoint> <endpoint type='zone' container-id='id' enabled='false' /> </endpoints> </instance> </deployment> """)) .getMessage()); assertEquals("Application-level endpoint 'default': targets 'us' in 'default', but its zone endpoint has 'enabled' set to 'false'", assertThrows(IllegalArgumentException.class, () -> DeploymentSpec.fromXml(""" <deployment> <instance id="default"> <prod> <region>us</region> <region>eu</region> </prod> <endpoints> <endpoint type='zone' container-id='id' enabled='false'> <region>us</region> </endpoint> </endpoints> </instance> <endpoints> <endpoint container-id='id' region='us'> <instance weight='1'>default</instance> </endpoint> </endpoints> </deployment> """)) .getMessage()); } @Test public void applicationLevelEndpoint() { DeploymentSpec spec = DeploymentSpec.fromXml(""" <deployment> <instance id="beta"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> </instance> <instance id="main"> <prod> <region active='true'>us-west-1</region> <region active='true'>us-east-3</region> </prod> <endpoints> <endpoint id="glob" container-id="music"/> </endpoints> </instance> <endpoints> <endpoint id="foo" container-id="movies" region='us-west-1'> <instance weight="2">beta</instance> <instance weight="8">main</instance> </endpoint> <endpoint id="bar" container-id="music" region='us-east-3'> <instance weight="10">main</instance> </endpoint> <endpoint id="baz" container-id="moose"> <instance weight="1" region='us-west-1'>main</instance> <instance weight="2" region='us-east-3'>main</instance> <instance weight="3" region='us-west-1'>beta</instance> </endpoint> </endpoints> </deployment> """); assertEquals(List.of(new Endpoint("foo", "movies", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 8))), new Endpoint("bar", "music", Level.application, List.of(new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 10))), new Endpoint("baz", "moose", Level.application, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 2), new Target(RegionName.from("us-west-1"), InstanceName.from("beta"), 3)))), spec.endpoints()); assertEquals(List.of(new Endpoint("glob", "music", Level.instance, List.of(new Target(RegionName.from("us-west-1"), InstanceName.from("main"), 1), new Target(RegionName.from("us-east-3"), InstanceName.from("main"), 1)))), spec.requireInstance("main").endpoints()); } @Test public void disallowExcessiveUpgradeBlocking() { List<String> specs = List.of( """ <deployment> <block-change/> </deployment>""", """ <deployment> <block-change days="mon-wed"/> <block-change days="tue-sun"/> </deployment>""", """ <deployment> <block-change to-date="2023-01-01"/> </deployment>""", """ <deployment> <block-change days="sat-sun"/> <block-change days="mon-fri" hours="0-10" from-date="2023-01-01" to-date="2023-01-15"/> <block-change days="mon-fri" hours="11-23" from-date="2023-01-01" to-date="2023-01-15"/> <block-change from-date="2023-01-14" to-date="2023-01-31"/></deployment>""" ); ManualClock clock = new ManualClock(); clock.setInstant(Instant.parse("2022-01-05T15:00:00.00Z")); for (var spec : specs) { assertInvalid(spec, "Cannot block Vespa upgrades for longer than 21 consecutive days", clock); } } @Test public void testDeployableHash() { assertEquals(DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags=' '> <test /> <staging tester-flavor='2-8-50' /> <block-change days='mon' /> <upgrade policy='canary' revision-target='next' revision-change='when-clear' rollout='simultaneous' /> <prod /> <notifications> <email role='author' /> <email address='dev@duff' /> </notifications> </instance> </deployment>""").deployableHashCode()); assertEquals(DeploymentSpec.fromXml(""" <deployment> <parallel> <instance id='one'> <prod> <region>name</region> </prod> </instance> <instance id='two' /> </parallel> </deployment>""").deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='one'> <prod> <steps> <region>name</region> <delay hours='3' /> <test>name</test> </steps> </prod> </instance> <instance id='two' /></deployment>""").deployableHashCode()); String referenceSpec = """ <deployment> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>"""; assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml("<deployment />").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' /> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default' tags='tag1'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='custom'> <prod> <region>other</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment major-version='9'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain' athenz-service='service'> <instance id='default'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default' athenz-service='service'> <prod> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment athenz-domain='domain'> <instance id='default'> <prod athenz-service='prod'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod global-service-id='service'> <region>name</region> </prod> </instance> </deployment>""").deployableHashCode()); assertNotEquals(DeploymentSpec.fromXml(referenceSpec).deployableHashCode(), DeploymentSpec.fromXml(""" <deployment> <instance id='default'> <prod> <region>name</region> </prod> <endpoints> <endpoint container-id="quux" /> </endpoints> </instance> </deployment>""").deployableHashCode()); } @Test @Test public void hostTTL() { String r = """ <deployment version='1.0' cloud-account='100000000000' empty-host-ttl='1h'> <instance id='alpha'> <staging /> <prod empty-host-ttl='1m'> <region>us-east</region> <region empty-host-ttl='2m'>us-west</region> <test>us-east</test> <test empty-host-ttl='3m'>us-west</test> </prod> </instance> <instance id='beta'> <staging empty-host-ttl='3d'/> <perf empty-host-ttl='4h'/> <prod> <region>us-east</region> <region empty-host-ttl='0d'>us-west</region> </prod> </instance> <instance id='gamma' empty-host-ttl='6h'> <dev empty-host-ttl='7d'/> <prod> <region>us-east</region> </prod> </instance> </deployment> """; DeploymentSpec spec = DeploymentSpec.fromXml(r); assertEquals(Map.of(AWS, CloudAccount.from("100000000000")), spec.cloudAccounts()); assertHostTTL(Duration.ofHours(1), spec, "alpha", test, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", staging, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", dev, null); assertHostTTL(Duration.ofHours(1), spec, "alpha", perf, null); assertHostTTL(Duration.ofMinutes(1), spec, "alpha", prod, "us-east"); assertHostTTL(Duration.ofMinutes(2), spec, "alpha", prod, "us-west"); assertEquals(Optional.of(Duration.ofMinutes(1)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-east"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertEquals(Optional.of(Duration.ofMinutes(3)), spec.requireInstance("alpha").steps().stream() .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-west"))) && step.isTest()) .findFirst().orElseThrow() .hostTTL()); assertHostTTL(Duration.ofHours(1), spec, "beta", test, null); assertHostTTL(Duration.ofDays(3), spec, "beta", staging, null); assertHostTTL(Duration.ofHours(1), spec, "beta", dev, null); assertHostTTL(Duration.ofHours(4), spec, "beta", perf, null); assertHostTTL(Duration.ofHours(1), spec, "beta", prod, "us-east"); assertHostTTL(Duration.ZERO, spec, "beta", prod, "us-west"); assertHostTTL(Duration.ofHours(6), spec, "gamma", test, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", staging, null); assertHostTTL(Duration.ofDays(7), spec, "gamma", dev, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", perf, null); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-east"); assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-west"); assertHostTTL(Duration.ofHours(1), spec, "nope", test, null); assertHostTTL(Duration.ofHours(1), spec, "nope", staging, null); assertHostTTL(Duration.ofHours(1), spec, "nope", dev, null); assertHostTTL(Duration.ofHours(1), spec, "nope", perf, null); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-east"); assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-west"); } private void assertCloudAccount(String expected, DeploymentSpec spec, CloudName cloud, String instance, Environment environment, String region) { assertEquals(CloudAccount.from(expected), spec.cloudAccount(cloud, InstanceName.from(instance), com.yahoo.config.provision.zone.ZoneId.from(environment, RegionName.from(region)))); } private void assertHostTTL(Duration expected, DeploymentSpec spec, String instance, Environment environment, String region) { assertEquals(Optional.of(expected), spec.hostTTL(InstanceName.from(instance), environment, region == null ? RegionName.defaultName() : RegionName.from(region))); } private static void assertInvalid(String deploymentSpec, String errorMessagePart) { assertInvalid(deploymentSpec, errorMessagePart, new ManualClock()); } private static void assertInvalid(String deploymentSpec, String errorMessagePart, Clock clock) { if (errorMessagePart.isEmpty()) throw new IllegalArgumentException("Message part must be non-empty"); try { new DeploymentSpecXmlReader(true, clock).read(deploymentSpec); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue("\"" + e.getMessage() + "\" contains \"" + errorMessagePart + "\"", e.getMessage().contains(errorMessagePart)); } } private static void assertInvalidEndpoints(String endpointsBody, String error) { assertEquals(error, assertThrows(IllegalArgumentException.class, () -> endpointIds(endpointsBody)) .getMessage()); } private static Set<String> endpointRegions(String endpointId, DeploymentSpec spec) { return spec.requireInstance("default").endpoints().stream() .filter(endpoint -> endpoint.endpointId().equals(endpointId)) .flatMap(endpoint -> endpoint.regions().stream()) .map(RegionName::value) .collect(Collectors.toSet()); } private static List<String> endpointIds(String endpointsBody) { var xml = "<deployment>" + " <instance id='default'>" + " <prod>" + " <region active=\"true\">us-east</region>" + " </prod>" + " <endpoints>" + endpointsBody + " </endpoints>" + " </instance>" + "</deployment>"; return DeploymentSpec.fromXml(xml).requireInstance("default").endpoints().stream() .map(Endpoint::endpointId) .toList(); } }